diff --git "a/1033.jsonl" "b/1033.jsonl" new file mode 100644--- /dev/null +++ "b/1033.jsonl" @@ -0,0 +1,434 @@ +{"seq_id": "514099463", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport sqlite3\n\n# 连接到数据库后,需要打开游标,称之为Cursor,通过Cursor执行SQL语句,然后,获得执行结果。\n\n# 连接到SQLite数据库\n# 数据库文件是test.db\n# 如果文件不存在,会自动在当前目录创建:\n\nconn = sqlite3.connect('test.db')\n\n# 创建一个Cursor:\ncursor = conn.cursor()\n\n# 执行一条SQL语句,创建user表:\ncursor.execute('create table user (id varchar(20) primary key, name varchar(20))')\n\n# 继续执行一条SQL语句,插入一条记录:\ncursor.execute(\"insert into user (id, name) values ('1', 'Michael')\")\n\n# 通过rowcount获得插入的行数:\nprint(cursor.rowcount)\n\n# 关闭Cursor:\ncursor.close()\n\n# 提交事务:\nconn.commit()\n\n# 关闭Connection:\nconn.close()\n\n# 使用Python的DB-API时,只要搞清楚Connection和Cursor对象,打开后一定记得关闭,就可以放心地使用。\n\n# 使用Cursor对象执行insert,update,delete语句时,执行结果由rowcount返回影响的行数,就可以拿到执行结果。\n\n# 使用Cursor对象执行select语句时,通过featchall()可以拿到结果集。结果集是一个list,每个元素都是一个tuple,对应一行记录。\n\n# 如果SQL语句带有参数,那么需要把参数按照位置传递给execute()方法,有几个?占位符就必须对应几个参数,例如:\n\n# cursor.execute('select * from user where name=? and pwd=?', ('abc', 'password'))\n# SQLite支持常见的标准SQL语句以及几种常见的数据类型。具体文档请参阅SQLite官方网站。\n", "sub_path": "04_python/18_访问数据库/01_SQLite.py", "file_name": "01_SQLite.py", "file_ext": "py", "file_size_in_byte": 1583, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "sqlite3.connect", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "386456747", "text": "import os\nfrom io import BytesIO\n\nimport PIL\nfrom flask import request, send_file\nfrom sqlalchemy.orm import load_only\n\nfrom project import app, img_path\nfrom project.models import Image\nfrom project.utils import make_dir\n\n\n@app.route(\"/image/\")\n@app.route(\"/image//\")\ndef image(id, hash=None):\n image = Image.query.options(\n load_only(Image.id, Image.encoding_format, Image.updated_at)\n ).get_or_404(id)\n\n # Dimensions\n width = 500\n height = 500\n\n if \"s\" in request.args:\n width = int(request.args[\"s\"])\n height = width\n\n # Generate file name\n extension = image.encoding_format.split(\"/\")[-1] if image.encoding_format else \"png\"\n hash = image.get_hash()\n file_path = os.path.join(img_path, f\"{id}-{hash}-{width}-{height}.{extension}\")\n\n # Load from disk if exists\n if os.path.exists(file_path):\n return send_file(file_path)\n\n # Save from database to disk\n make_dir(img_path)\n img = PIL.Image.open(BytesIO(image.data))\n img.thumbnail((width, height), PIL.Image.ANTIALIAS)\n img.save(file_path)\n\n # Load from disk\n return send_file(file_path)\n", "sub_path": "project/views/image.py", "file_name": "image.py", "file_ext": "py", "file_size_in_byte": 1148, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "project.models.Image.query.options", "line_number": 16, "usage_type": "call"}, {"api_name": "project.models.Image.query", "line_number": 16, "usage_type": "attribute"}, {"api_name": "project.models.Image", "line_number": 16, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.load_only", "line_number": 17, "usage_type": "call"}, {"api_name": "project.models.Image.id", "line_number": 17, "usage_type": "attribute"}, {"api_name": "project.models.Image", "line_number": 17, "usage_type": "name"}, {"api_name": "project.models.Image.encoding_format", "line_number": 17, "usage_type": "attribute"}, {"api_name": "project.models.Image.updated_at", "line_number": 17, "usage_type": "attribute"}, {"api_name": "flask.request.args", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 25, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "project.img_path", "line_number": 31, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "flask.send_file", "line_number": 35, "usage_type": "call"}, {"api_name": "project.utils.make_dir", "line_number": 38, "usage_type": "call"}, {"api_name": "project.img_path", "line_number": 38, "usage_type": "argument"}, {"api_name": "PIL.Image.open", "line_number": 39, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 39, "usage_type": "attribute"}, {"api_name": "io.BytesIO", "line_number": 39, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 40, "usage_type": "attribute"}, {"api_name": "flask.send_file", "line_number": 44, "usage_type": "call"}, {"api_name": "project.app.route", "line_number": 13, "usage_type": "call"}, {"api_name": "project.app", "line_number": 13, "usage_type": "name"}, {"api_name": "project.app.route", "line_number": 14, "usage_type": "call"}, {"api_name": "project.app", "line_number": 14, "usage_type": "name"}]} +{"seq_id": "502047319", "text": "\"\"\"\nModule containing functions allowing to visualize states passed through NN which is used.\n\"\"\"\n\nimport os\nimport numpy as np\nimport pickle\nimport pandas as pd\nfrom collections import defaultdict\nimport matplotlib as mtl\nimport matplotlib.pyplot as plt\nfrom sklearn import manifold, preprocessing\nfrom sklearn import decomposition\n\nfrom ai_challenge.utils import get_results_path\n\n\ndef traj_2_array(traj_dict, feature_nm):\n data = []\n traj_ind = []\n for traj_no, traj in traj_dict.items():\n for step, values_dict in traj.items():\n value = values_dict[feature_nm]\n data.append(value)\n traj_ind.append(traj_no)\n return np.array(data), np.array(traj_ind)\n\n\ndef reconstruct_traj(data, traj_ind):\n traj_dict = defaultdict(list)\n for index, step in zip(traj_ind, data):\n traj_dict[index].append(step)\n return traj_dict\n\n\ndef fit_dim_red(traj_dict_fn, n_comp, feature_nm, opponent_type_fn=None):\n with open(os.path.join(get_results_path(), traj_dict_fn), 'rb') as handle:\n traj_dict = pickle.load(handle)\n\n opponent_type = []\n if opponent_type_fn is not None:\n with open(os.path.join(get_results_path(), opponent_type_fn), 'rb') as handle:\n opponent_type = pd.read_csv(handle)['type']\n opponent_type = [1 if opp == 'FocusedAgent' else 0 for opp in list(opponent_type)]\n data, traj_ind = traj_2_array(traj_dict, feature_nm)\n data_scaled = data[:, 0, :]\n models = ['TSNE', 'Isomap', 'PCA']\n\n for dim_red in models:\n fig = plt.figure()\n print('Fitting: ', dim_red)\n if hasattr(manifold, dim_red):\n dim_red_model = getattr(manifold, dim_red)(n_components=n_comp)\n elif hasattr(decomposition, dim_red):\n dim_red_model = getattr(decomposition, dim_red)(n_components=n_comp)\n else:\n raise AttributeError(\n 'Specified dimensionality reduction not found '\n 'in sklearn.mainfold or sklearn.decomposition.')\n trans_data = dim_red_model.fit_transform(data_scaled)\n trans_traj_data = reconstruct_traj(trans_data, traj_ind)\n\n for index, traj in trans_traj_data.items():\n point_type = '.b'\n if opponent_type_fn is not None:\n opp_typ = opponent_type[index]\n point_type = '.r' if int(opp_typ) == 1 else '.b'\n for step, point in enumerate(traj):\n plt.plot(point[0], point[1], point_type, markersize=1)\n fig.suptitle(dim_red)\n\n path, f_name = os.path.split(traj_dict_fn)\n plt.savefig(os.path.join(get_results_path(), path, feature_nm + dim_red+ '_dim_red_plot.png'))\n", "sub_path": "ai_challenge/visualization/dim_reduction.py", "file_name": "dim_reduction.py", "file_ext": "py", "file_size_in_byte": 2687, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "numpy.array", "line_number": 26, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "ai_challenge.utils.get_results_path", "line_number": 37, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "ai_challenge.utils.get_results_path", "line_number": 42, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "sklearn.manifold", "line_number": 52, "usage_type": "argument"}, {"api_name": "sklearn.manifold", "line_number": 53, "usage_type": "argument"}, {"api_name": "sklearn.decomposition", "line_number": 54, "usage_type": "argument"}, {"api_name": "sklearn.decomposition", "line_number": 55, "usage_type": "argument"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "os.path.split", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "ai_challenge.utils.get_results_path", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "378564322", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nEste modulo contiene las funciones necesarias para llamar a las funciones de los modulos anteriores\ny configurar y lanzar una ejecución de un algoritmo evolutivo que permita alcanzar la\nsolución óptima al problema de la mochila.\n\n@author: bbaruque\n\"\"\"\n\nfrom deap import base, tools\nfrom deap import algorithms\n\nimport evol_simple.ConfiguracionSolucion\nimport evol_simple.Evaluacion\n\n#%% Se Define la configuracion del algoritmo genetico\ndef configuracionAlgoritmo(toolbox): \n # Se seleccionan procedimiento standard para cruce, mutacion y seleccion\n\ttoolbox.register(\"mate\", tools.cxOnePoint)\n\ttoolbox.register(\"mutate\", tools.mutFlipBit, indpb=0.2)\n\ttoolbox.register(\"select\", tools.selTournament, tournsize=3)\n\t# Se define cómo se evaluará cada individuo\n\t# En este caso, se hará uso de la función de evaluación que se ha definido en el modulo Evaluacion.py\n\ttoolbox.register(\"evaluate\", evol_simple.Evaluacion.evalKnapsack)\n\n#%% Se define como se realiza la Evolución de la busqueda de la solución\ndef realizaEvolucion(toolbox, stats):\n\n # Se configura cómo se define cada individuo. Ver fichero correspondiente\n evol_simple.ConfiguracionSolucion.configuraPoblacion(toolbox)\n\n configuracionAlgoritmo(toolbox)\n\n # Se inicializa la poblacion con 300 individuos\n population = toolbox.population(n=300)\n\n # Se llama al algoritmo que permite la evolucion de las soluciones\n population, logbook = algorithms.eaSimple(population, toolbox, \n\t cxpb=0.5, mutpb=0.2, # Probabilidades de cruce y mutacion\n\t ngen=20, verbose=False, stats=stats) # Numero de generaciones a completar y estadisticas a recoger\n\n # Por cada generación, la estructura de logbook va almacenando un resumen de los\n # avances del algoritmo.\n print(\"El resultado de la evolución es: \")\n print(logbook)\n\n # Comprobamos cual es la mejor solucion encontrada por evolucion\n print(\"La mejor solucion encontrada es: \")\n print(tools.selBest(population,1)[0])\n \n return logbook\n\nif __name__ == \"__main__\":\n # Herramienta para guardar la configuracion de la ejecucion\n toolbox = base.Toolbox()\n realizaEvolucion(toolbox,[])", "sub_path": "Ejemplos/evol_simple/CicloEvolutivo.py", "file_name": "CicloEvolutivo.py", "file_ext": "py", "file_size_in_byte": 2240, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "deap.tools.cxOnePoint", "line_number": 19, "usage_type": "attribute"}, {"api_name": "deap.tools", "line_number": 19, "usage_type": "name"}, {"api_name": "deap.tools.mutFlipBit", "line_number": 20, "usage_type": "attribute"}, {"api_name": "deap.tools", "line_number": 20, "usage_type": "name"}, {"api_name": "deap.tools.selTournament", "line_number": 21, "usage_type": "attribute"}, {"api_name": "deap.tools", "line_number": 21, "usage_type": "name"}, {"api_name": "evol_simple.ConfiguracionSolucion.Evaluacion", "line_number": 24, "usage_type": "attribute"}, {"api_name": "evol_simple.ConfiguracionSolucion", "line_number": 24, "usage_type": "name"}, {"api_name": "evol_simple.ConfiguracionSolucion.ConfiguracionSolucion.configuraPoblacion", "line_number": 30, "usage_type": "call"}, {"api_name": "evol_simple.ConfiguracionSolucion.ConfiguracionSolucion", "line_number": 30, "usage_type": "attribute"}, {"api_name": "evol_simple.ConfiguracionSolucion", "line_number": 30, "usage_type": "name"}, {"api_name": "deap.algorithms.eaSimple", "line_number": 38, "usage_type": "call"}, {"api_name": "deap.algorithms", "line_number": 38, "usage_type": "name"}, {"api_name": "deap.tools.selBest", "line_number": 49, "usage_type": "call"}, {"api_name": "deap.tools", "line_number": 49, "usage_type": "name"}, {"api_name": "deap.base.Toolbox", "line_number": 55, "usage_type": "call"}, {"api_name": "deap.base", "line_number": 55, "usage_type": "name"}]} +{"seq_id": "127746919", "text": "# ===================================================================== #\n# APP : urls #\n# ===================================================================== #\n\n\nfrom django.urls import path, include\n#\nfrom django.views.generic.base import TemplateView as TV\nfrom django.views.generic.base import RedirectView as RV\n#\nfrom django.views.generic.list import ListView as LV # list\nfrom django.views.generic.detail import DetailView as SV # show\n#\nfrom django.views.generic.edit import FormView as FV # form\nfrom django.views.generic.edit import CreateView as CV # create\nfrom django.views.generic.edit import UpdateView as UV # update\nfrom django.views.generic.edit import DeleteView as DV # delete\n\nfrom . import views\n\n\n\n# ===================================================================== #\n# ROUTES #\n# ===================================================================== #\nurlpatterns = [\n\n# ================================= #\n# GENERAL #\n# ================================= #\n path('',\n views.demo_home,\n name='demo_home'),\n\n path('about/',\n TV.as_view(template_name='demo/demo_about.html') ,\n name='demo_about'),\n\n path('gg/',\n RV.as_view(url='https://www.google.fr',query_string=True),\n name='ask_google' ),\n\n# ================================= #\n# CATEGORY #\n# ================================= #\n path('category/',\n views.CategoryListView.as_view(),\n name='demo_category_list'),\n\n path('category/',\n views.CategoryShowView.as_view(),\n name='demo_category_show'),\n\n path('category/new',\n views.CategoryCreateView.as_view(),\n name='demo_category_create'),\n\n path('category//edit',\n views.CategoryEditView.as_view(),\n name='demo_category_edit'),\n\n path('category//delete',\n views.CategoryDeleteView.as_view(),\n name='demo_category_delete'),\n\n\n# ================================= #\n# TAGLIB #\n# ================================= #\n path('taglib/',\n views.demo_taglib,\n name='demo_taglib'),\n\n# ================================= #\n# MESSAGES #\n# ================================= #\n path('messages/',\n views.demo_messages,\n name='demo_messages'),\n# ================================= #\n# SECURITY #\n# ================================= #\n\n path('loginrequired/',\n views.demo_login_required,\n name='demo_login_required'),\n\n path('permissionrequired/',\n views.demo_permission_required,\n name='demo_permission_required'),\n\n]\n\n\n\n\n\"\"\"\nFORMS \n /category/ => list\n /category/new => create \n /category/{id} => show\n /category/{id}/edit => edit\n /category/{id}/delete => delete ( ask for confirm )\n \n /category/{id}/item/ => list ( with category )\n /category/{id}/item/{id} => redirect ( to /item/{id} )\n /category/{id}/item/new => create ( with category )\n\"\"\"\n\n\n\n\n\n# ********************************************************************* #\n# DEMO #\n# ********************************************************************* #\n\"\"\"\nurlpatterns = [\n path('hello/', views.hello, name='demo_hello'),\n path('hi/', views.hi, name='demo_hi_unknown'),\n path('hi/', views.hi, name='demo_hi_named'),\n path('bob', views.bob, name='demo_bob'),\n path('gg', views.gg, name='demo_gg'),\n path('welcome/', views.welcome, name='demo_welcome'),\n path('greetings/', views.greetings, name='demo_greetings'),\n path('time', views.time, name='demo_time'),\n path('extended', views.extended, name='demo_extended_template'),\n path('assets', views.assets, name='demo_static_files'),\n path('categories/', views.categories, name='demo_category_list'),\n path('categories/create', views.category_create, name='demo_category_create'),\n path('categories/', views.category_show, name='demo_category_show'),\n path('items/create', views.item_create, name='demo_item_create'),\n]\n\"\"\"\n", "sub_path": "demo/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 4510, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 35, "usage_type": "call"}, {"api_name": "django.views.generic.base.TemplateView.as_view", "line_number": 36, "usage_type": "call"}, {"api_name": "django.views.generic.base.TemplateView", "line_number": 36, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 39, "usage_type": "call"}, {"api_name": "django.views.generic.base.RedirectView.as_view", "line_number": 40, "usage_type": "call"}, {"api_name": "django.views.generic.base.RedirectView", "line_number": 40, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 46, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 50, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 54, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 58, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 62, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 70, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 77, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 84, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 88, "usage_type": "call"}]} +{"seq_id": "445243542", "text": "import re\n\nimport scrapy\nimport datetime\nfrom tutorial.items import KisssubItem\nfrom bs4 import BeautifulSoup\nfrom re import search, findall\n\n\nclass KisssubSpider(scrapy.Spider):\n name = 'kisssub'\n allowed_domains = ['www.kisssub.org']\n start_urls = ['http://www.kisssub.org/search.php?keyword=%E6%AD%BB%E7%A5%9E%E5%B0%91%E7%88%B7%E4%B8%8E%E9%BB%91%E5%A5%B3%E4%BB%86']\n\n def parse(self, response):\n content = response.css(\"#data_list\").extract_first()\n td_list = BeautifulSoup(content, \"html.parser\").select(\"td\")\n for one in range(50):\n kiss = KisssubItem()\n published_date = td_list[0 + one * 8].text\n if '今天' in published_date:\n published_date = str(datetime.date.today()).replace('-', '/')\n elif '昨天' in published_date:\n published_date = str(datetime.date.today() - datetime.timedelta(days=1)).replace('-', '/')\n elif '前天' in published_date:\n published_date = str(datetime.date.today() - datetime.timedelta(days=2)).replace('-', '/')\n kiss['published_time'] = published_date\n kiss['_type'] = td_list[1 + one * 8].text\n kiss['title'] = td_list[2 + one * 8].text\n kiss['href'] = \"http://www.kisssub.org/\" + search(\"href=\\\"(.+?)\\\"\", str(td_list[2 + one * 8]))[1]\n # magnet_request = scrapy.Request(url=kiss['href'], callback=self.parse2) # 2021/8/14改进, 直接从href中取\n # magnet_request.meta['kiss'] = kiss  # 同上\n kiss['magnet_link'] = re.search(\"show-(.+?).html\", kiss['href'])[1] # kiss['magnet_link'] = magnet_request\n kiss['content_length'] = td_list[3 + one * 8].text\n kiss['seed'] = td_list[4 + one * 8].text\n kiss['download_times'] = td_list[5 + one * 8].text\n kiss['complete_times'] = td_list[6 + one * 8].text\n kiss['author'] = td_list[7 + one * 8].text\n yield kiss # magnet_request\n # try:\n # next_ = response.css(\"a.nextprev::attr('href')\").extract()[0]\n #\n # except IndexError:\n # # self.crawler.engine.close_spider(self, '\\n\\n\\n\\n\\n')\n # try:\n # next_ = response.css(\"a.nextprev::attr('href')\").extract()[1]\n # except IndexError:\n # self.crawler.engine.close_spider(self, \"\\n\")\n # url = response.urljoin(next_)\n\n href_list = response.css(\".nextprev\").getall()\n for href in href_list:\n if \"〉\" in href:\n try:\n url = re.search(\"href=\\\"(.+?)\\\"\", href)[1]\n url = url.replace(\"&\", \"&\")\n url = response.urljoin(url)\n # with open(\"record.txt\", 'w', encoding='utf-8') as file:\n # file.write(url)\n yield scrapy.Request(url=url, callback=self.parse)\n except IndexError:\n self.crawler.engine.close_spier(self, '\\n')\n\n\n\n\n\n def parse2(self, response):\n kiss = response.meta['kiss']\n kiss['magnet_link'] = findall(\"\\w+\", response.css(\"#text_hash_id\").extract_first())[4]\n yield kiss\n", "sub_path": "tutorial/tutorial/spiders/kisssub.py", "file_name": "kisssub.py", "file_ext": "py", "file_size_in_byte": 3213, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "scrapy.Spider", "line_number": 10, "usage_type": "attribute"}, {"api_name": "bs4.BeautifulSoup", "line_number": 17, "usage_type": "call"}, {"api_name": "tutorial.items.KisssubItem", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 22, "usage_type": "attribute"}, {"api_name": "datetime.date.today", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 24, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 26, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 26, "usage_type": "call"}, {"api_name": "re.search", "line_number": 30, "usage_type": "call"}, {"api_name": "re.search", "line_number": 33, "usage_type": "call"}, {"api_name": "re.search", "line_number": 55, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 60, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "240934999", "text": "# coding: utf-8\nfrom flask import Blueprint, render_template, redirect, url_for, g, request\nfrom ..utils.permissions import UserPermission\nfrom ..forms.piece import TextForm, ImageForm\nfrom ..models import db, Piece\n\nbp = Blueprint('piece', __name__)\n\n\n@bp.route('/piece/add')\n@UserPermission()\ndef add():\n \"\"\"添加条目\"\"\"\n return render_template('piece/add.html')\n\n\n@bp.route('/piece/add_text', methods=['GET', 'POST'])\n@UserPermission()\ndef add_text():\n \"\"\"添加文字类条目\"\"\"\n channel_id = request.args.get('channel_id', type=int)\n form = TextForm()\n if channel_id:\n form.channel_id.data = channel_id\n form.channel_id.choices = [(c.id, c.name) for c in g.user.channels]\n if form.validate_on_submit():\n piece = Piece(title=form.title.data, content=form.content.data, channel_id=form.channel_id.data,\n user_id=g.user.id, kind='TEXT')\n db.session.add(piece)\n db.session.commit()\n return redirect(url_for('channel.view', uid=piece.channel_id))\n return render_template('piece/add_text.html', form=form)\n\n\n@bp.route('/piece/edit_text')\ndef edit_text():\n \"\"\"编辑文字类条目\"\"\"\n return render_template('piece/edit_text.html')\n\n\n@bp.route('/piece/add_image', methods=['GET', 'POST'])\n@UserPermission()\ndef add_image():\n \"\"\"添加图片\"\"\"\n channel_id = request.args.get('channel_id', type=int)\n form = ImageForm()\n if channel_id:\n form.channel_id.data = channel_id\n form.channel_id.choices = [(c.id, c.name) for c in g.user.channels]\n if form.validate_on_submit():\n piece = Piece(image=form.image.data, desc=form.desc.data, channel_id=form.channel_id.data, user_id=g.user.id,\n kind='IMAGE')\n db.session.add(piece)\n db.session.commit()\n return redirect(url_for('channel.view', uid=piece.channel_id))\n return render_template('piece/add_image.html', form=form)\n\n\n@bp.route('/piece/edit_image')\ndef edit_image():\n return render_template('piece/edit_image.html')\n\n\n@bp.route('/piece/')\ndef view(uid):\n \"\"\"单个条目\"\"\"\n piece = Piece.query.get_or_404(uid)\n return render_template('piece/view.html', piece=piece)\n\n\n@bp.route('/piece//edit', methods=['GET', 'POST'])\n@UserPermission()\ndef edit(uid):\n \"\"\"编辑条目\"\"\"\n piece = Piece.query.get_or_404(uid)\n if piece.kind == 'TEXT':\n form = TextForm(obj=piece)\n form.channel_id.choices = [(c.id, c.name) for c in g.user.channels]\n if form.validate_on_submit():\n piece.title = form.title.data\n piece.content = form.content.data\n piece.channel_id = form.channel_id.data\n db.session.add(piece)\n db.session.commit()\n return redirect(url_for('.view', uid=uid))\n return render_template('piece/edit_text.html', form=form)\n\n elif piece.kind == 'IMAGE':\n form = ImageForm(obj=piece)\n form.channel_id.choices = [(c.id, c.name) for c in g.user.channels]\n if form.validate_on_submit():\n piece.image = form.image.data\n piece.desc = form.desc.data\n piece.channel_id = form.channel_id.data\n db.session.add(piece)\n db.session.commit()\n return redirect(url_for('.view', uid=uid))\n return render_template('piece/edit_image.html', form=form)\n", "sub_path": "application/controllers/piece.py", "file_name": "piece.py", "file_ext": "py", "file_size_in_byte": 3372, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "flask.Blueprint", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 14, "usage_type": "call"}, {"api_name": "utils.permissions.UserPermission", "line_number": 11, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 21, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 21, "usage_type": "name"}, {"api_name": "forms.piece.TextForm", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.g.user", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 25, "usage_type": "name"}, {"api_name": "models.Piece", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.g.user", "line_number": 28, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 28, "usage_type": "name"}, {"api_name": "models.db.session.add", "line_number": 29, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 29, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 29, "usage_type": "name"}, {"api_name": "models.db.session.commit", "line_number": 30, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 30, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 30, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 32, "usage_type": "call"}, {"api_name": "utils.permissions.UserPermission", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 45, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 45, "usage_type": "name"}, {"api_name": "forms.piece.ImageForm", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.g.user", "line_number": 49, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 49, "usage_type": "name"}, {"api_name": "models.Piece", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.g.user", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 51, "usage_type": "name"}, {"api_name": "models.db.session.add", "line_number": 53, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 53, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 53, "usage_type": "name"}, {"api_name": "models.db.session.commit", "line_number": 54, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 54, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 54, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 56, "usage_type": "call"}, {"api_name": "utils.permissions.UserPermission", "line_number": 42, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 61, "usage_type": "call"}, {"api_name": "models.Piece.query.get_or_404", "line_number": 67, "usage_type": "call"}, {"api_name": "models.Piece.query", "line_number": 67, "usage_type": "attribute"}, {"api_name": "models.Piece", "line_number": 67, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 68, "usage_type": "call"}, {"api_name": "models.Piece.query.get_or_404", "line_number": 75, "usage_type": "call"}, {"api_name": "models.Piece.query", "line_number": 75, "usage_type": "attribute"}, {"api_name": "models.Piece", "line_number": 75, "usage_type": "name"}, {"api_name": "forms.piece.TextForm", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.g.user", "line_number": 78, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 78, "usage_type": "name"}, {"api_name": "models.db.session.add", "line_number": 83, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 83, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 83, "usage_type": "name"}, {"api_name": "models.db.session.commit", "line_number": 84, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 84, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 84, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 85, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 85, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 86, "usage_type": "call"}, {"api_name": "forms.piece.ImageForm", "line_number": 89, "usage_type": "call"}, {"api_name": "flask.g.user", "line_number": 90, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 90, "usage_type": "name"}, {"api_name": "models.db.session.add", "line_number": 95, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 95, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 95, "usage_type": "name"}, {"api_name": "models.db.session.commit", "line_number": 96, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 96, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 96, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 97, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 97, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 98, "usage_type": "call"}, {"api_name": "utils.permissions.UserPermission", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "628540687", "text": "#coding:utf-8\nimport pymysql\nimport re,requests\nfrom bs4 import BeautifulSoup\n\n#定义数据库连接\ndb= pymysql.connect(host=\"localhost\",user=\"root\", \n password=\"root\",db=\"yu\",port=3306,charset=\"utf8\") \n#用来伪装成浏览器访问\nheaders = {'User-Agent':'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.152 Safari/537.36'}\n#正则表达式匹配\npat='该域名未被注册或被隐藏'\n#打开��标\ncur = db.cursor() \n#新建数据表\ncreate_sql=\"\"\"\nCREATE TABLE IF NOT EXISTS `cihui_data` (\n `cihui_data` varchar(100) DEFAULT NULL,\n `update_time` datetime DEFAULT CURRENT_TIMESTAMP,\n `com_flag` varchar(6) DEFAULT NULL,\n `cn_flag` varchar(6) DEFAULT NULL,\n `reason` varchar(6) DEFAULT NULL\n)\n\"\"\"\ncur.execute(create_sql)\n# 新建检查表\ncheck_sql=\"\"\"CREATE TABLE IF NOT EXISTS `cihui_check` (\n `cihui_flag` varchar(100) DEFAULT NULL,\n `update_time` datetime DEFAULT CURRENT_TIMESTAMP\n)\"\"\"\n\ncur.execute(check_sql)\n#检查第一步插入是否已经完成\ncheck_sql_sel='select count(1) from cihui_check where cihui_flag=1'\ncur.execute(check_sql_sel)\ncheck_data = cur.fetchone()\nif check_data[0] !=1:\n #因为3位数的域名基本没了,所以从四位开始\n list=['0','1','2','3','4','5','6','7','8','9']\n for a in list:\n for b in list:\n for c in list:\n for d in list:\n sql_insert =\"\"\"insert into cihui_data(cihui_data) values('%s')\"\"\" % (a+b+c+d)\n cur.execute(sql_insert) \n print(a+b+c+d)\n \n for a in list:\n for b in list:\n for c in list:\n for d in list:\n for e in list:\n sql_insert =\"\"\"insert into cihui_data(cihui_data) values('%s')\"\"\" % (a+b+c+d+e)\n cur.execute(sql_insert) \n print(a+b+c+d+e)\n \n for a in list:\n for b in list:\n for c in list:\n for d in list:\n for e in list:\n for f in list:\n sql_insert =\"\"\"insert into cihui_data(cihui_data) values('%s')\"\"\" % (a+b+c+d+e+f)\n cur.execute(sql_insert) \n print(a+b+c+d+e+f)\n sql_check='insert into cihui_check(cihui_flag) values(%s)'% 1\n cur.execute(sql_check)\n db.commit()\ncur.execute(\"SELECT cihui_data from cihui_data where com_flag is null\")\ndata = cur.fetchall()\n\nprint(\"开始处理域名~\")\nfor i in data:\n try:\n url='http://whois.chinaz.com/%s.com' % i[0]\n cont=requests.get(url,headers=headers)\n soup=BeautifulSoup(cont.text,'lxml')\n if re.search(pat, soup.find(\"div\", class_=\"IcpMain02\").text):\n sql = \"UPDATE cihui_data SET com_flag =1 WHERE cihui_data = '%s'\" % i[0]\n cur.execute(sql)\n db.commit()\n print(i)\n else:\n sql = \"UPDATE cihui_data SET com_flag =0 WHERE cihui_data = '%s'\" % i[0]\n cur.execute(sql)\n db.commit()\n print(i)\n except:\n sql = \"UPDATE cihui_data SET reason ='error' WHERE cihui_data = '%s'\" % i[0]\n cur.execute(sql)\n db.commit()\n print('error~')\n continue\nprint(\"完成域名处理~\")\ndb.close()\n", "sub_path": "reduce.py", "file_name": "reduce.py", "file_ext": "py", "file_size_in_byte": 3304, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pymysql.connect", "line_number": 7, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 76, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 77, "usage_type": "call"}, {"api_name": "re.search", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "509163821", "text": "\"\"\"\nUtilities for Otter Grade\n\"\"\"\n\nimport tempfile\nimport tarfile\nimport os\nimport docker\nimport pandas as pd\n\nfrom contextlib import contextmanager\nfrom hashlib import md5\n\nOTTER_DOCKER_IMAGE_TAG = \"otter-grade\"\n\n@contextmanager\ndef simple_tar(path):\n \"\"\"\n Context manager that takes a file at ``path`` and creates a temporary tar archive from which the \n bytes in the file can be read. Yields the file object with the pointer set to the beginning of the\n file. Used for adding files to Docker containers through the Docker Python SDK. Closes and deletes\n the temporary file after the context is closed.\n\n Args:\n path (``str``): path to the desired file\n\n Yields:\n ``tempfile.NamedTemporaryFile``: the file with the tar archive written to it\n \"\"\"\n f = tempfile.NamedTemporaryFile()\n t = tarfile.open(mode=\"w\", fileobj=f)\n\n path = os.path.abspath(path)\n t.add(path, arcname=os.path.basename(path))\n\n t.close()\n f.seek(0)\n\n yield f\n\n f.close()\n\n@contextmanager\ndef get_container_file(container, path):\n \"\"\"\n Retrieves a file at ``path`` from a Docker container ``container``. Reads the bytes of this file\n as a tar archive from the container and writes these bytes to a temporary file. Extracts the single\n member of the tar archive from the temporary file and writes the bytes to another temporary file.\n Yields the file object with the pointer set to the beginning of the file. Closes and deletes the\n temporary files after the context is closed.\n\n Args:\n container (``docker.models.Container``): the Docker container object\n path (``str``): the path to the file in the container\n\n Yields:\n ``tempfile.NamedTemporaryFile``: the open temporary file with the extracted contents\n \"\"\"\n tarf = tempfile.NamedTemporaryFile()\n f = tempfile.NamedTemporaryFile()\n\n bits, _ = container.get_archive(path)\n for chunk in bits:\n tarf.write(chunk)\n\n tarf.seek(0)\n\n tar = tarfile.open(mode=\"r\", fileobj=tarf)\n\n members = tar.getmembers()\n assert len(members) == 1, \"Too many members to extract from container\"\n file_contents = tar.extractfile(members[0])\n \n f.write(file_contents.read())\n tar.close()\n tarf.close()\n \n f.seek(0)\n\n yield f\n\n f.close()\n\ndef list_files(path):\n \"\"\"\n Returns a list of all non-hidden files in a directory\n \n Args:\n path (``str``): path to a directory\n \n Returns:\n ``list`` of ``str``: list of filenames in the given directory\n\n \"\"\"\n return [file for file in os.listdir(path) if os.path.isfile(os.path.join(path, file)) and file[0] != \".\"]\n\ndef merge_csv(dataframes):\n \"\"\"\n Merges dataframes along the vertical axis\n \n Args:\n dataframes (``list`` of ``pandas.core.frame.DataFrame``): list of dataframes with same columns\n \n Returns:\n ``pandas.core.frame.DataFrame``: A merged dataframe resulting from 'stacking' all input dataframes\n\n \"\"\"\n final_dataframe = pd.concat(dataframes, axis=0, join='inner').sort_index()\n return final_dataframe\n\ndef prune_images():\n \"\"\"\n Prunes all Docker images named ``otter-grade``\n \"\"\"\n # this is a fix for travis -- allows overriding docker client version\n if os.environ.get(\"OTTER_DOCKER_CLIENT_VERSION\") is not None:\n client = docker.from_env(version=os.environ.get(\"OTTER_DOCKER_CLIENT_VERSION\"))\n else:\n client = docker.from_env()\n \n images = client.images.list()\n\n for img in images:\n if any([OTTER_DOCKER_IMAGE_TAG in t for t in img.tags]):\n client.images.remove(img.tags[0], force=True)\n\ndef generate_hash(path):\n \"\"\"\n Reads in a file and returns an MD5 hash of its contents.\n\n Args:\n path (``str``): path to the file that will be read in and hashed\n\n Returns:\n ``str``: the hash value of the file\n \"\"\"\n zip_hash = \"\"\n m = md5()\n with open(path, \"rb\") as f:\n data = f.read() # read file in chunk and call update on each chunk if file is large.\n m.update(data)\n zip_hash = m.hexdigest()\n return zip_hash\n", "sub_path": "otter/grade/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 4123, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "tempfile.NamedTemporaryFile", "line_number": 30, "usage_type": "call"}, {"api_name": "tarfile.open", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "contextlib.contextmanager", "line_number": 16, "usage_type": "name"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 59, "usage_type": "call"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 60, "usage_type": "call"}, {"api_name": "tarfile.open", "line_number": 68, "usage_type": "call"}, {"api_name": "contextlib.contextmanager", "line_number": 43, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 95, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 108, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 116, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 116, "usage_type": "attribute"}, {"api_name": "docker.from_env", "line_number": 117, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 117, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 117, "usage_type": "attribute"}, {"api_name": "docker.from_env", "line_number": 119, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 138, "usage_type": "call"}]} +{"seq_id": "236687191", "text": "from django.conf.urls import url\n\nfrom . import views\n\n\napp_name = 'profile'\nurlpatterns = [\n url(r'^player/(?P[0-9]+)/$', views.PlayerProfileView.as_view(),\n name='player-profile'),\n url(r'^height/create/$', views.HeightCreateView.as_view(),\n name='height-create'),\n url(r'^weight/create/$', views.WeightCreateView.as_view(),\n name='weight-create'),\n url(r'^parents-height/create/$', views.ParentsHeightCreateView.as_view(),\n name='parents-height-create'),\n url(r'^sitting-height/create/$', views.SittingHeightCreateView.as_view(),\n name='sitting-height-create'),\n url(r'^body-fat/create/$', views.BodyFatCreateView.as_view(),\n name='body-fat-create'),\n url(r'^height/(?P[0-9]+)/$', views.HeightView.as_view(),\n name='height'),\n url(r'^weight/(?P[0-9]+)/$', views.WeightView.as_view(),\n name='weight'),\n url(r'^parents-height/(?P[0-9]+)/$', views.ParentsHeightView.as_view(),\n name='parents-height'),\n url(r'^sitting-height/(?P[0-9]+)/$', views.SittingHeightView.as_view(),\n name='sitting-height'),\n url(r'^height/list/$', views.HeightListView.as_view(),\n name='height-list'),\n url(r'^weight/list/$', views.WeightListView.as_view(),\n name='weight-list'),\n url(r'^sitting-height/list/$', views.SittingHeightListView.as_view(),\n name='sitting-height-list'),\n url(r'^parents-height/list/$', views.ParentsHeightListView.as_view(),\n name='parents-height-list'),\n]\n", "sub_path": "apps/profile/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1520, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 20, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 22, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 24, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 26, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 28, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 30, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 32, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "367924094", "text": "import sys\r\nfrom flask import Flask\r\nfrom flask import request\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom flask import Flask, render_template, jsonify, request\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom flask_marshmallow import Marshmallow\r\nimport requests\r\nimport os\r\nimport sys, socket\r\nimport datetime\r\nimport threading\r\nfrom datetime import datetime\r\nimport requests\r\nimport random\r\nimport string\r\nimport threading\r\nfrom marshmallow import Schema, fields\r\nimport json\r\nimport time\r\nimport socket\r\nimport os\r\n#import marshmallow_sqlalchemy\r\n\r\napp = Flask(__name__)\r\nbasedir = os.path.abspath(os.path.dirname(__file__))\r\n\r\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'order.sqlite')\r\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\r\ndb = SQLAlchemy(app) # defining the sqlite db\r\n#ma = Marshmallow(app)\r\n\r\n\r\n# defining various urls\r\nisLocal = False\r\ncatalog_url ='http://localhost'\r\n\r\n\r\n\r\nlog_lock = threading.Lock() #here is a lock to calculate time and performance\r\n\r\n\r\n\r\n\r\nclass PurchaseRequest(db.Model):\r\n\r\n id = db.Column(db.String(16), primary_key=True) # unique id\r\n book_name = db.Column(db.String(16)) # name of the item\r\n item_number = db.Column(db.Integer, nullable=False) # item number\r\n total_price = db.Column(db.Float, nullable=False) # total price of the order\r\n remaining_stock = db.Column(db.Integer) # remaining stock\r\n date_created = db.Column(db.DateTime, default=datetime.utcnow()) # date and time of the order\r\n\r\n\r\nclass PurchaseRequestSchema(Schema):\r\n\r\n id = fields.Str(dump_only=True)\r\n book_name = fields.Str(dump_only=True)\r\n item_number = fields.Int()\r\n total_price = fields.Float()\r\n remaining_stock = fields.Int()\r\n date_created = fields.DateTime()\r\n\r\n\r\n\r\n\r\n@app.route('/buy/', methods=['GET'])\r\ndef buy(args):\r\n\r\n # note the request start time\r\n request_start = datetime.now()\r\n request_id = request.values['request_id']\r\n\r\n\r\n # form the query url and get the result\r\n port = str(5001)\r\n query_url = catalog_url + ':' + port + '/lookup/' + str(args)\r\n hostname = socket.gethostname()\r\n ip = socket.gethostbyname(hostname)\r\n request_success = False\r\n while not request_success:\r\n try:\r\n query_result = requests.get(url=query_url, data={'request_id': request_id})\r\n query_data = query_result.json()\r\n\r\n # if the item is in stock\r\n if query_data is not None and query_data['result']['quantity'] > 0:\r\n\r\n # form the query url and get the result\r\n update_url = catalog_url + ':' + port + '/update/' + str(args)\r\n update_result = requests.get(url=update_url, data={'request_id': request_id})\r\n update_data = update_result.json()\r\n\r\n # if the item is in stock\r\n if update_data['result'] == 0:\r\n request_success = True\r\n # create a unique order id\r\n _id = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(16))\r\n\r\n # create an order db object and add to orders db\r\n purchase_request = PurchaseRequest(id=_id, book_name=query_data['result']['name'], item_number=args,\r\n total_price=query_data['result']['cost'],\r\n remaining_stock=update_data['remaining_stock'])\r\n db.session.add(purchase_request)\r\n db.session.commit()\r\n\r\n # get the newly created order details\r\n order_details = PurchaseRequest.query.filter_by(id=_id).first()\r\n order_schema = PurchaseRequestSchema()\r\n result = order_schema.dump(order_details)\r\n\r\n # note the request end time and calculate the difference\r\n request_end = datetime.now()\r\n request_time = request_end - request_start\r\n\r\n # acquire a lock on the file and write the time\r\n log_lock.acquire()\r\n\r\n print(request_time.microseconds / 1000)\r\n\r\n log_lock.release()\r\n\r\n # return the result\r\n return {'result': 'Buy Successful', 'data': result, 'catalog_host/ip':update_data['catalog_host/ip'],\r\n 'order_host/ip': hostname+'/'+ip}\r\n\r\n # if the item is not in stock\r\n else:\r\n # return failure\r\n return {'result': 'Buy Failed!',\r\n 'data': {'book_name': query_data['result']['name'], 'item_number': args, 'remaining_stock': 0},\r\n 'catalog_host/ip': update_data['catalog_host/ip'],\r\n 'order_host/ip': hostname + '/' + ip\r\n }\r\n # if the item is not in stock\r\n else:\r\n # return failure\r\n return {'result': 'Buy Failed!',\r\n 'data': {'book_name': query_data['result']['name'], 'item_number': args, 'remaining_stock': 0},\r\n 'catalog_host/ip': update_data['catalog_host/ip'],\r\n 'order_host/ip': hostname + '/' + ip\r\n }\r\n\r\n except Exception:\r\n time.sleep(3)\r\n\r\n\r\n\r\n\r\n\r\n@app.route('/', methods=['GET'])\r\ndef start():\r\n return \"order server is working!\"\r\nif __name__ == '__main__':\r\n\r\n app.run(host='localhost', port=5003)", "sub_path": "orderServer.py", "file_name": "orderServer.py", "file_ext": "py", "file_size_in_byte": 5641, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "flask.Flask", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 30, "usage_type": "call"}, {"api_name": "threading.Lock", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 52, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 52, "usage_type": "name"}, {"api_name": "marshmallow.Schema", "line_number": 55, "usage_type": "name"}, {"api_name": "marshmallow.fields.Str", "line_number": 57, "usage_type": "call"}, {"api_name": "marshmallow.fields", "line_number": 57, "usage_type": "name"}, {"api_name": "marshmallow.fields.Str", "line_number": 58, "usage_type": "call"}, {"api_name": "marshmallow.fields", "line_number": 58, "usage_type": "name"}, {"api_name": "marshmallow.fields.Int", "line_number": 59, "usage_type": "call"}, {"api_name": "marshmallow.fields", "line_number": 59, "usage_type": "name"}, {"api_name": "marshmallow.fields.Float", "line_number": 60, "usage_type": "call"}, {"api_name": "marshmallow.fields", "line_number": 60, "usage_type": "name"}, {"api_name": "marshmallow.fields.Int", "line_number": 61, "usage_type": "call"}, {"api_name": "marshmallow.fields", "line_number": 61, "usage_type": "name"}, {"api_name": "marshmallow.fields.DateTime", "line_number": 62, "usage_type": "call"}, {"api_name": "marshmallow.fields", "line_number": 62, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 71, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 71, "usage_type": "name"}, {"api_name": "flask.request.values", "line_number": 72, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 72, "usage_type": "name"}, {"api_name": "socket.gethostname", "line_number": 78, "usage_type": "call"}, {"api_name": "socket.gethostbyname", "line_number": 79, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 83, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 91, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 98, "usage_type": "call"}, {"api_name": "string.ascii_uppercase", "line_number": 98, "usage_type": "attribute"}, {"api_name": "string.ascii_lowercase", "line_number": 98, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 98, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 113, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 113, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 145, "usage_type": "call"}]} +{"seq_id": "414099383", "text": "import os, sys, json, csv, random, re\nfrom datetime import datetime\n\ntas = []\nwith open(\"tas.json\") as f:\n raw = json.load(f)\n for ta in raw:\n tas.extend([ta] * ta['weight'])\nrandom.shuffle(tas)\n\ndef main():\n print(\"canvas.csv => roster.json\")\n\n # parse roster\n with open(\"roster.json\") as f:\n roster = json.load(f)\n\n roster_dict = {}\n for row in roster:\n net_id = row.get(\"net_id\", \"\")\n if net_id != \"\":\n roster_dict[net_id] = row\n\n # parse canvas\n with open(\"canvas.csv\") as f:\n canvas = [dict(row) for row in csv.DictReader(f)]\n\n # add students from canvas to roster\n ta_idx = 0\n for row in canvas:\n email = row.get(\"SIS Login ID\", \"\").lower()\n section = None\n m = re.search(r'LEC00(\\d)', row['Section'])\n if m:\n section = int(m.group(1))\n if email == \"\" or not email.endswith(\"@wisc.edu\"):\n continue\n net_id = email.split(\"@\")[0]\n\n if net_id in roster_dict:\n # update existing students\n roster_dict[net_id][\"name\"] = row[\"Student\"]\n roster_dict[net_id][\"section\"] = section\n roster_dict[net_id][\"enrolled\"] = True\n roster_dict.pop(net_id)\n else:\n # add new students\n ta = tas[ta_idx % len(tas)]\n roster.append({\"enrolled\": True, \"net_id\": net_id, \"section\": section, \"ta_name\": ta[\"name\"], \"ta_email\":ta[\"email\"]})\n ta_idx += 1\n\n # anybody left here was not in canvas\n for key in roster_dict:\n # drop students\n roster_dict[key][\"enrolled\"] = False\n\n # parse roster\n with open(\"roster.json\", \"w\") as f:\n json.dump(roster, f, indent=True, sort_keys=True)\n\nif __name__ == '__main__':\n main()\n", "sub_path": "tools/canvas_to_roster.py", "file_name": "canvas_to_roster.py", "file_ext": "py", "file_size_in_byte": 1788, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "json.load", "line_number": 6, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 9, "usage_type": "call"}, {"api_name": "json.load", "line_number": 16, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 26, "usage_type": "call"}, {"api_name": "re.search", "line_number": 33, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "338206584", "text": "from flask import Flask, jsonify, request\nimport generator_recsys\nimport tensorflow as tf\nimport data_loader_recsys\nimport utils\nimport argparse\nfrom tensorflow.contrib import learn\nimport json\n\napp = Flask(__name__)\nsess = tf.Session()\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--datapath', type=str, default='Data/Session/')\nparser.add_argument(\"--dilated_channels\", type=int, default=100, help='number of dilated channels')\nparser.add_argument(\"--learning_rate\", type=float, default=0.008, help='learning rate')\nparser.add_argument(\"--kernel_size\", type=int, default=3, help=\"kernel size\")\nparser.add_argument(\"--batch_size\", type=int, default=300, help=\"batch size\")\nparser.add_argument(\"--max_seq_size\", type=int, default=80, help=\"max seq len\")\nargs = parser.parse_args()\nmodel_path = args.datapath + \"/\" + \"model.ckpt\"\nvocab_path = args.datapath + \"/\" + \"vocab.pickle\"\n\n\ndef load_model(n_items, path):\n model_params = {\n 'item_size': n_items,\n 'dilated_channels': args.dilated_channels,\n 'dilations': [1, 2, 1, 2, 1, 2, ],\n 'kernel_size': args.kernel_size,\n 'learning_rate': args.learning_rate,\n 'batch_size': args.batch_size,\n 'is_negsample': False\n }\n itemrec = generator_recsys.NextItNet_Decoder(model_params)\n itemrec.train_graph(model_params['is_negsample'])\n itemrec.predict_graph(model_params['is_negsample'], reuse=True)\n init = tf.global_variables_initializer()\n sess.run(init)\n saver = tf.train.Saver()\n saver.restore(sess, path)\n return itemrec\n\ndef get_dataset(path):\n vocab = learn.preprocessing.VocabularyProcessor.restore(path)\n item_dict = vocab.vocabulary_._mapping\n vocabulary = vocab.vocabulary_\n print(\"len item dict\")\n print(len(item_dict))\n return item_dict, vocabulary, vocab\n\nitem_dict = json.load(open(vocab_path, 'r'))\nmodel = load_model(len(item_dict)+1, model_path)\nvocabulary = json.load(open(vocab_path+\"inverted\", 'r'))\n\ndef pad_sequence(user_profile, max_seq_size):\n\n if max_seq_size > len(user_profile):\n dif = max_seq_size - len(user_profile)\n # fill gaps with UNK (0) interaction as suggested in docs\n pads = [0] * dif\n return pads + user_profile\n # longer sequences are not a problem according to the readme\n return user_profile\n\n\ndef prepare_sequence(user_profile, item_dict, max_seq_size):\n user_profile = [item_dict[str(i)] if str(i) in item_dict else -1 for i in user_profile]\n user_profile = pad_sequence(user_profile, max_seq_size)\n return [user_profile]\n\ndef recommend(model, user_profile, item_dict, vocabulary, max_seq_size, top_k=10):\n input_sequence = prepare_sequence(user_profile, item_dict, max_seq_size)\n print(\"original input\")\n print(user_profile)\n print(\"prepared input sequence\")\n print(input_sequence)\n [probs] = sess.run([model.g_probs], feed_dict={model.input_predict: input_sequence})\n if probs.shape[0] > 0:\n pred_items = utils.sample_top_k_with_scores(probs[0][-1], top_k=top_k)\n predictions = [(vocabulary[str(item_token)],score) if str(item_token) in vocabulary else (\"[UNK]\", score) for (item_token, score) in pred_items]\n print(\"predictions\")\n print(predictions)\n return predictions\n print(\"empty pred\")\n return []\n\n\n@app.route('/recommend', methods=['POST'])\ndef recommend_endpoint():\n request_data = request.json\n user_profile = request_data['user_profile']\n max_seq_size = args.max_seq_size\n probs = recommend(model, user_profile, item_dict, vocabulary, max_seq_size)\n json_serializable = [(i, str(score)) for i, score in probs]\n return jsonify({\"items\": json_serializable})\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0',debug=True)\n", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 3757, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "flask.Flask", "line_number": 10, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 11, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 13, "usage_type": "call"}, {"api_name": "generator_recsys.NextItNet_Decoder", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 38, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 40, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.learn.preprocessing.VocabularyProcessor.restore", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.contrib.learn.preprocessing", "line_number": 45, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.learn", "line_number": 45, "usage_type": "name"}, {"api_name": "json.load", "line_number": 52, "usage_type": "call"}, {"api_name": "json.load", "line_number": 54, "usage_type": "call"}, {"api_name": "utils.sample_top_k_with_scores", "line_number": 80, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 91, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 91, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 96, "usage_type": "call"}]} +{"seq_id": "44935306", "text": "# -*- coding:utf-8 -*-\nfrom random import randint\n\nimport requests\nfrom retrying import retry\n\n\nclass Headers:\n def __init__(self):\n self.windows = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3610.2 Safari/537.36\"}\n self.ios = {\n \"User-Agent\": \"Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1\"}\n self.android = {\n \"User-Agent\": \"Mozilla/5.0 (Linux; Android 5.0; SM-G900P Build/LRX21T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3610.2 Mobile Safari/537.36\"}\n self.mac = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3610.2 Safari/537.36\"}\n self.default = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11\"}\n\n def get_headers(self, headers_type):\n headers_all = {\n 'default': self.default,\n 'windows': self.windows,\n 'mac': self.mac,\n 'android': self.android,\n 'ios': self.ios\n }\n if headers_type != 'random':\n headers = headers_all.get(headers_type)\n else:\n headers = list(headers_all.values())[randint(0, 4)]\n return headers\n\n\n@retry(stop_max_attempt_number=3)\ndef _parse_url(url_info, method, data, headers_type):\n headers = Headers().get_headers(headers_type)\n if method == 'POST':\n response = requests.post(url_info, data=data, headers=headers)\n else:\n response = requests.get(url_info, headers=headers, timeout=3)\n assert response.status_code == 200\n return response.content.decode()\n\n\ndef parse_url(url_info, method='GET', data=None, headers_type='default'):\n try:\n result = _parse_url(url_info, method, data, headers_type)\n except Exception:\n result = None\n\n return result\n\n\nif __name__ == '__main__':\n url = 'http://www.baidu.com'\n res = parse_url(url, headers_type='random')\n print(len(res))\n", "sub_path": "tools/build/lib/crawler/currency.py", "file_name": "currency.py", "file_ext": "py", "file_size_in_byte": 2191, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "random.randint", "line_number": 32, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 40, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 42, "usage_type": "call"}, {"api_name": "retrying.retry", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "9611072", "text": "from openerp import models, fields, api, _\nfrom openerp.osv import osv\nimport time\nfrom datetime import date\nfrom dateutil import relativedelta\nimport openerp.addons.decimal_precision as dp\n\nclass hr_loan(models.Model):\n _name = 'hr.loan'\n\n @api.multi\n def write(self, vals):\n loan_details_obj = self.env['hr.loan.detail']\n prec = self.env['decimal.precision'].precision_get('Account')\n id = super(hr_loan, self).write(vals)\n for loan in self:\n if loan.state == 'accepted' and not vals.get('state', False):\n value = {}\n base = round(loan.amount / loan.period, prec)\n benefit = round((loan.amount / loan.period) * loan.rate, prec)\n for i in range(loan.period):\n value = {\n 'name': loan.description + ' ' + str((date(*time.strptime(str(loan.start_date),'%Y-%m-%d')[:3]) + relativedelta.relativedelta(months=+i)).strftime('%Y-%m')),\n 'start_date': str((date(*time.strptime(str(loan.start_date),'%Y-%m-%d')[:3]) + relativedelta.relativedelta(months=+i)).strftime('%Y-%m-%d')),\n 'base': base,\n 'benefit': benefit,\n 'total': base + benefit,\n 'state': \"draft\",\n 'loan_id': loan.id,\n }\n loan_details_obj.create(value)\n return id\n\n @api.multi\n def accepted_loan(self):\n if not self.period or not self.start_date :\n raise osv.except_osv(_('Warning!'), _('You cannot accept a loan without defining the rate, the period and the start date !'))\n self.state = 'accepted'\n return True\n\n @api.multi\n def refused_loan(self):\n self.state = 'refused'\n for detail in self.detail_ids:\n detail.unlink()\n return True\n\n @api.multi\n def anticipate_loan(self):\n self.state = 'done'\n for detail in self.detail_ids:\n detail.state = 'done'\n return True\n\n @api.multi\n def simulate_loan(self):\n sum = 0\n sum1 = 0\n sum2 = 0\n prec = self.env['decimal.precision'].precision_get('Account')\n value = {}\n if self.period and self.start_date :\n for detail in self.detail_ids:\n detail.unlink()\n base = round(self.amount / self.period, prec)\n benefit = round((self.amount / self.period) * self.rate, prec)\n for i in range(self.period - 1):\n value = {\n 'name': self.description +' ' + str((date(*time.strptime(str(self.start_date),'%Y-%m-%d')[:3]) + relativedelta.relativedelta(months=+i)).strftime('%Y-%m')),\n 'start_date': str((date(*time.strptime(str(self.start_date),'%Y-%m-%d')[:3]) + relativedelta.relativedelta(months=+i)).strftime('%Y-%m-%d')),\n 'base': base,\n 'benefit': benefit,\n 'total': base + benefit,\n 'state': \"draft\",\n 'loan_id': self.id,\n }\n sum += base\n sum1 += benefit\n sum2 += base + benefit\n self.env['hr.loan.detail'].create(value)\n value = {\n 'name': self.description +' ' + str((date(*time.strptime(str(self.start_date),'%Y-%m-%d')[:3]) + relativedelta.relativedelta(months=+i +1)).strftime('%Y-%m')),\n 'start_date': str((date(*time.strptime(str(self.start_date),'%Y-%m-%d')[:3]) + relativedelta.relativedelta(months=+ i+ 1)).strftime('%Y-%m-%d')),\n 'base': self.amount-sum,\n 'benefit': round(self.amount * self.rate, prec)-sum1,\n 'total': (self.amount+(self.amount*self.rate))-sum2,\n 'state': \"draft\",\n 'loan_id': self.id,\n }\n self.env['hr.loan.detail'].create(value)\n return True\n\n @api.multi\n def unlink (self):\n for loan in self:\n if loan.state != 'draft':\n raise osv.except_osv(_('Warning!'),_('You cannot delete this loan !'))\n for detail in self.detail_ids:\n detail.unlink()\n return super(hr_loan, self).unlink()\n\n @api.multi\n def onchange_date(self, period, start_date):\n v = {}\n if not period:\n return {'value': v}\n if not start_date:\n return {'value': v}\n v['end_date'] = str(date(*time.strptime(str(start_date),'%Y-%m-%d')[:3]) + relativedelta.relativedelta(months=+period, day=1, days=0))[:10]\n return {'value': v}\n \n name = fields.Char('Number', size=64, readonly=True)\n description = fields.Char('Description', size=128)\n employee_id = fields.Many2one('hr.employee', 'Employee', required=True)\n loan_date = fields.Date('Approval Date', required=True, default=date.today())\n amount = fields.Float('Amount', digits_compute=dp.get_precision('Account'), required=True)\n period = fields.Integer('Period')\n start_date = fields.Date('Start Date')\n end_date = fields.Date('End Date')\n rate = fields.Float('Rate', digits_compute=dp.get_precision('Discount'), help=\"A rate of 0.08 is equal to 8 %\")\n rule_id = fields.Many2one('hr.salary.rule', 'Loan Type', required=True, domain=[('category_id.is_loan','=',True)])\n state = fields.Selection([('draft','Draft'),('accepted','Accepted'),('refused','Refused'),('done','Done')], 'State', readonly=True, default='draft')\n detail_ids = fields.One2many('hr.loan.detail','loan_id', 'Details')\n\n @api.model\n def create(self, vals):\n vals['name'] = self.env['ir.sequence'].get('hr.loan') or '/'\n return super(hr_loan, self).create(vals)\n\nclass hr_loan_detail(models.Model):\n _name= 'hr.loan.detail'\n _order = 'loan_id, state'\n\n name = fields.Char('Description',size=128)\n loan_id = fields.Many2one('hr.loan',string='Loan', ondelete='cascade')\n start_date = fields.Date('Date')\n base = fields.Float('Base', digits_compute=dp.get_precision('Account'))\n benefit = fields.Float('Benefit', digits_compute=dp.get_precision('Account'))\n total = fields.Float('Total', digits_compute=dp.get_precision('Account'))\n state = fields.Selection([('draft','Draft'),('done','Done')], 'State')\n state_rel = fields.Selection(related='loan_id.state', selection=[('draft', 'Draft'), ('accepted', 'Accepted'), ('refused', 'Refused'), ('done', 'Done')], string=\"State\")\n\n @api.multi\n def anticipate(self):\n self.state = 'done'\n for loan in self:\n loan_id = loan.loan_id.id\n loan_ids = self.search([('loan_id','=',loan_id),('state','=','draft')])\n if not loan_ids:\n self.env['hr.loan'].write([loan_id], {'state':'done'})\n \n return True\n\n @api.multi\n def unlink(self):\n for loan in self:\n if loan.state == 'done' :\n raise osv.except_osv(_('Warning!'),_('You cannot delete a Loan which is not draft !'))\n return super(hr_loan_detail, self).unlink()\n\n @api.multi\n def write(self, data):\n for loan in self:\n if data.get('start_date', loan.start_date) < loan.start_date:\n raise osv.except_osv(_('Warning!'),_('You cannot anticipate a Loan with anterior date !'))\n return super(hr_loan_detail, self).write(data)\n\nclass hr_salary_rule_category(models.Model):\n _inherit = 'hr.salary.rule.category'\n\n is_loan = fields.Boolean('Loan')\n\nclass hr_payslip(models.Model):\n _inherit = 'hr.payslip'\n\n def get_inputs(self, cr, uid, contract_ids, date_from, date_to, payslip_run_id, context=None):\n res = super(hr_payslip, self).get_inputs(cr, uid, contract_ids, date_from, date_to, payslip_run_id, context=context)\n\n loan_obj = self.pool.get('hr.loan') \n loan_detail_obj = self.pool.get('hr.loan.detail') \n contract_obj = self.pool.get('hr.contract')\n\n for contract in contract_obj.browse(cr, uid, contract_ids, context=context):\n loan_id = loan_obj.search(cr, uid, [('employee_id', '=', contract.employee_id.id),('state', '=', 'accepted')], context=context)\n if loan_id:\n for rule in loan_obj.browse(cr, uid, loan_id, context=context):\n for loan in rule.detail_ids:\n if loan.state == 'draft' and loan.start_date<=date_to:\n inputs = {\n 'name': rule.rule_id.name,\n 'code': rule.rule_id.code,\n 'amount': loan.total,\n 'contract_id': contract.id,\n }\n res += [inputs]\n return res\n\nclass hr_payslip_run(models.Model):\n _inherit = 'hr.payslip.run'\n \n def close_payslip_run(self, cr, uid, ids, context=None): \n loan_obj = self.pool.get('hr.loan')\n loan_detail_obj = self.pool.get('hr.loan.detail')\n for payslip in self.browse(cr, uid, ids, context=context).slip_ids:\n date_start = payslip.date_from\n date_end = payslip.date_to\n employee = payslip.employee_id\n loan_id = loan_obj.search(cr, uid, [('employee_id', '=', employee.id), ('state', '=', 'accepted')], context=context)\n loan_ids = loan_detail_obj.search(cr, uid, [('loan_id', '=',loan_id ),('start_date','>',date_start),('start_date','<',date_end)], context=context)\n loan_detail_obj.write(cr, uid, loan_ids, {'state':'done'})\n return super(hr_payslip_run, self).close_payslip_run(cr, uid, ids, context=context)\n\n \n \n\n\n \n", "sub_path": "prooaddons/hr_loan/hr_loan.py", "file_name": "hr_loan.py", "file_ext": "py", "file_size_in_byte": 9830, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "openerp.models.Model", "line_number": 8, "usage_type": "attribute"}, {"api_name": "openerp.models", "line_number": 8, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 23, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 23, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 23, "usage_type": "call"}, {"api_name": "dateutil.relativedelta", "line_number": 23, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 24, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 24, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 24, "usage_type": "call"}, {"api_name": "dateutil.relativedelta", "line_number": 24, "usage_type": "name"}, {"api_name": "openerp.api.multi", "line_number": 11, "usage_type": "attribute"}, {"api_name": "openerp.api", "line_number": 11, "usage_type": "name"}, {"api_name": "openerp.osv.osv.except_osv", "line_number": 37, "usage_type": "call"}, {"api_name": "openerp.osv.osv", "line_number": 37, "usage_type": "name"}, {"api_name": "openerp._", "line_number": 37, "usage_type": "call"}, {"api_name": "openerp.api.multi", "line_number": 34, "usage_type": "attribute"}, {"api_name": "openerp.api", "line_number": 34, "usage_type": "name"}, {"api_name": "openerp.api.multi", "line_number": 41, "usage_type": "attribute"}, {"api_name": "openerp.api", "line_number": 41, "usage_type": "name"}, {"api_name": "openerp.api.multi", "line_number": 48, "usage_type": "attribute"}, {"api_name": "openerp.api", "line_number": 48, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 69, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 69, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 69, "usage_type": "call"}, {"api_name": "dateutil.relativedelta", "line_number": 69, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 70, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 70, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 70, "usage_type": "call"}, {"api_name": "dateutil.relativedelta", "line_number": 70, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 82, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 82, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 82, "usage_type": "call"}, {"api_name": "dateutil.relativedelta", "line_number": 82, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 83, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 83, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 83, "usage_type": "call"}, {"api_name": "dateutil.relativedelta", "line_number": 83, "usage_type": "name"}, {"api_name": "openerp.api.multi", "line_number": 55, "usage_type": "attribute"}, {"api_name": "openerp.api", "line_number": 55, "usage_type": "name"}, {"api_name": "openerp.osv.osv.except_osv", "line_number": 97, "usage_type": "call"}, {"api_name": "openerp.osv.osv", "line_number": 97, "usage_type": "name"}, {"api_name": "openerp._", "line_number": 97, "usage_type": "call"}, {"api_name": "openerp.api.multi", "line_number": 93, "usage_type": "attribute"}, {"api_name": "openerp.api", "line_number": 93, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 109, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 109, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 109, "usage_type": "call"}, {"api_name": "dateutil.relativedelta", "line_number": 109, "usage_type": "name"}, {"api_name": "openerp.api.multi", "line_number": 102, "usage_type": "attribute"}, {"api_name": "openerp.api", "line_number": 102, "usage_type": "name"}, {"api_name": "openerp.fields.Char", "line_number": 112, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 112, "usage_type": "name"}, {"api_name": "openerp.fields.Char", "line_number": 113, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 113, "usage_type": "name"}, {"api_name": "openerp.fields.Many2one", "line_number": 114, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 114, "usage_type": "name"}, {"api_name": "openerp.fields.Date", "line_number": 115, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 115, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 115, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 115, "usage_type": "name"}, {"api_name": "openerp.fields.Float", "line_number": 116, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 116, "usage_type": "name"}, {"api_name": "openerp.addons.decimal_precision.get_precision", "line_number": 116, "usage_type": "call"}, {"api_name": "openerp.addons.decimal_precision", "line_number": 116, "usage_type": "name"}, {"api_name": "openerp.fields.Integer", "line_number": 117, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 117, "usage_type": "name"}, {"api_name": "openerp.fields.Date", "line_number": 118, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 118, "usage_type": "name"}, {"api_name": "openerp.fields.Date", "line_number": 119, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 119, "usage_type": "name"}, {"api_name": "openerp.fields.Float", "line_number": 120, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 120, "usage_type": "name"}, {"api_name": "openerp.addons.decimal_precision.get_precision", "line_number": 120, "usage_type": "call"}, {"api_name": "openerp.addons.decimal_precision", "line_number": 120, "usage_type": "name"}, {"api_name": "openerp.fields.Many2one", "line_number": 121, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 121, "usage_type": "name"}, {"api_name": "openerp.fields.Selection", "line_number": 122, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 122, "usage_type": "name"}, {"api_name": "openerp.fields.One2many", "line_number": 123, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 123, "usage_type": "name"}, {"api_name": "openerp.api.model", "line_number": 125, "usage_type": "attribute"}, {"api_name": "openerp.api", "line_number": 125, "usage_type": "name"}, {"api_name": "openerp.models.Model", "line_number": 130, "usage_type": "attribute"}, {"api_name": "openerp.models", "line_number": 130, "usage_type": "name"}, {"api_name": "openerp.fields.Char", "line_number": 134, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 134, "usage_type": "name"}, {"api_name": "openerp.fields.Many2one", "line_number": 135, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 135, "usage_type": "name"}, {"api_name": "openerp.fields.Date", "line_number": 136, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 136, "usage_type": "name"}, {"api_name": "openerp.fields.Float", "line_number": 137, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 137, "usage_type": "name"}, {"api_name": "openerp.addons.decimal_precision.get_precision", "line_number": 137, "usage_type": "call"}, {"api_name": "openerp.addons.decimal_precision", "line_number": 137, "usage_type": "name"}, {"api_name": "openerp.fields.Float", "line_number": 138, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 138, "usage_type": "name"}, {"api_name": "openerp.addons.decimal_precision.get_precision", "line_number": 138, "usage_type": "call"}, {"api_name": "openerp.addons.decimal_precision", "line_number": 138, "usage_type": "name"}, {"api_name": "openerp.fields.Float", "line_number": 139, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 139, "usage_type": "name"}, {"api_name": "openerp.addons.decimal_precision.get_precision", "line_number": 139, "usage_type": "call"}, {"api_name": "openerp.addons.decimal_precision", "line_number": 139, "usage_type": "name"}, {"api_name": "openerp.fields.Selection", "line_number": 140, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 140, "usage_type": "name"}, {"api_name": "openerp.fields.Selection", "line_number": 141, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 141, "usage_type": "name"}, {"api_name": "openerp.api.multi", "line_number": 143, "usage_type": "attribute"}, {"api_name": "openerp.api", "line_number": 143, "usage_type": "name"}, {"api_name": "openerp.osv.osv.except_osv", "line_number": 158, "usage_type": "call"}, {"api_name": "openerp.osv.osv", "line_number": 158, "usage_type": "name"}, {"api_name": "openerp._", "line_number": 158, "usage_type": "call"}, {"api_name": "openerp.api.multi", "line_number": 154, "usage_type": "attribute"}, {"api_name": "openerp.api", "line_number": 154, "usage_type": "name"}, {"api_name": "openerp.osv.osv.except_osv", "line_number": 165, "usage_type": "call"}, {"api_name": "openerp.osv.osv", "line_number": 165, "usage_type": "name"}, {"api_name": "openerp._", "line_number": 165, "usage_type": "call"}, {"api_name": "openerp.api.multi", "line_number": 161, "usage_type": "attribute"}, {"api_name": "openerp.api", "line_number": 161, "usage_type": "name"}, {"api_name": "openerp.models.Model", "line_number": 168, "usage_type": "attribute"}, {"api_name": "openerp.models", "line_number": 168, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 171, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 171, "usage_type": "name"}, {"api_name": "openerp.models.Model", "line_number": 173, "usage_type": "attribute"}, {"api_name": "openerp.models", "line_number": 173, "usage_type": "name"}, {"api_name": "openerp.models.Model", "line_number": 198, "usage_type": "attribute"}, {"api_name": "openerp.models", "line_number": 198, "usage_type": "name"}]} +{"seq_id": "586793778", "text": "import time\nimport re\n\nfrom django.http import HttpResponseRedirect\nfrom django.utils.http import cookie_date\nfrom django.conf import settings\n\nfrom browsecap.browser import is_mobile\n\n# default cookie expire time is one month\nDEFAULT_COOKIE_MAX_AGE = 3600*24*31\n\nclass MobileRedirectMiddleware(object):\n def process_request(self, request):\n if not getattr(settings, 'MOBILE_DOMAIN', False):\n return\n\n # Cookie settings\n max_age = getattr(settings, 'MOBILE_COOKIE_MAX_AGE', DEFAULT_COOKIE_MAX_AGE)\n expires_time = time.time() + max_age\n expires = cookie_date(expires_time)\n\n # test for browser return\n if (\n # is mobile?\n is_mobile(request.META.get('HTTP_USER_AGENT', ''))\n and\n # but has param m2w?\n request.GET.get('m2w', False)\n and\n # does currently not have a is browser cookie with 1\n request.COOKIES.get('isbrowser', '0') == '0'\n ):\n ''' Set a cookie for Mobile 2 Web if a mobile browser does not want to browse mobile '''\n response = HttpResponseRedirect(request.META.get('PATH_INFO', '/'))\n response.set_cookie('ismobile', '0', domain=settings.SESSION_COOKIE_DOMAIN, max_age=max_age, expires=expires)\n response.set_cookie('isbrowser', '1', domain=settings.SESSION_COOKIE_DOMAIN, max_age=max_age, expires=expires)\n return response\n\n # test for mobile browser\n if (\n # check for override cookie, do not check if present\n request.COOKIES.get('ismobile', '0') == '1' or (\n # browser info present\n 'HTTP_USER_AGENT' in request.META\n and\n # desktop browser override not set\n request.COOKIES.get('isbrowser', '0') != '1'\n and\n # check browser type\n is_mobile(request.META.get('HTTP_USER_AGENT', ''))\n and\n # check whether ipad should be redirected\n self.redirect_ipad(request.META.get('HTTP_USER_AGENT', ''))\n )\n ):\n redirect = settings.MOBILE_DOMAIN\n if getattr(settings, 'MOBILE_REDIRECT_PRESERVE_URL', False):\n redirect = redirect.rstrip('/') + request.path_info\n\n # redirect to mobile domain\n response = HttpResponseRedirect(redirect)\n response.set_cookie('ismobile', '1', domain=settings.SESSION_COOKIE_DOMAIN, max_age=max_age, expires=expires)\n return response\n\n\n def redirect_ipad(self, user_agent):\n if not getattr(settings, 'BROWSECAP_REDIRECT_IPAD', False):\n match = re.search('iPad', user_agent, re.I)\n if match:\n return False\n return True\n", "sub_path": "browsecap/middleware.py", "file_name": "middleware.py", "file_ext": "py", "file_size_in_byte": 2920, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.conf.settings", "line_number": 15, "usage_type": "argument"}, {"api_name": "django.conf.settings", "line_number": 19, "usage_type": "argument"}, {"api_name": "time.time", "line_number": 20, "usage_type": "call"}, {"api_name": "django.utils.http.cookie_date", "line_number": 21, "usage_type": "call"}, {"api_name": "browsecap.browser.is_mobile", "line_number": 26, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 35, "usage_type": "call"}, {"api_name": "django.conf.settings.SESSION_COOKIE_DOMAIN", "line_number": 36, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 36, "usage_type": "name"}, {"api_name": "django.conf.settings.SESSION_COOKIE_DOMAIN", "line_number": 37, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 37, "usage_type": "name"}, {"api_name": "browsecap.browser.is_mobile", "line_number": 51, "usage_type": "call"}, {"api_name": "django.conf.settings.MOBILE_DOMAIN", "line_number": 57, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 57, "usage_type": "name"}, {"api_name": "django.conf.settings", "line_number": 58, "usage_type": "argument"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 62, "usage_type": "call"}, {"api_name": "django.conf.settings.SESSION_COOKIE_DOMAIN", "line_number": 63, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 63, "usage_type": "name"}, {"api_name": "django.conf.settings", "line_number": 68, "usage_type": "argument"}, {"api_name": "re.search", "line_number": 69, "usage_type": "call"}, {"api_name": "re.I", "line_number": 69, "usage_type": "attribute"}]} +{"seq_id": "626808883", "text": "from __future__ import absolute_import, unicode_literals\nfrom django import template\n\nregister = template.Library()\n\n\n@register.inclusion_tag(\"profile_item.html\")\ndef show_profile(user):\n return {\"user\": user}\n\n\n@register.simple_tag\ndef clear_search_url(request):\n getvars = request.GET.copy()\n if \"search\" in getvars:\n del getvars[\"search\"]\n if len(list(getvars.keys())) > 0:\n return \"{0}?{1}\".format(request.path, getvars.urlencode())\n else:\n return request.path\n", "sub_path": "pinax/apps/profiles/templatetags/profile_tags.py", "file_name": "profile_tags.py", "file_ext": "py", "file_size_in_byte": 501, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.template.Library", "line_number": 4, "usage_type": "call"}, {"api_name": "django.template", "line_number": 4, "usage_type": "name"}]} +{"seq_id": "608555607", "text": "from syft.frameworks.torch.mpc.primitives import PrimitiveStorage\nfrom syft.generic.utils import remote, allow_command\n\nimport torch\nimport syft\n\n\nclass PRZS:\n def __init__(self):\n self.generators = {}\n\n ring_size = 2 ** 32\n\n @classmethod\n def setup(cls, players):\n seed_map = cls.generate_and_share_seeds(players)\n for worker, seeds in seed_map.items():\n if worker == syft.local_worker:\n initialize_generators = _initialize_generators\n else:\n initialize_generators = remote(_initialize_generators, location=worker)\n initialize_generators(*seeds)\n\n @classmethod\n def generate_and_share_seeds(cls, players):\n \"\"\"\n Returns: dict {player i: seed i, seed i+1}\n \"\"\"\n local_seeds = []\n remote_seeds = []\n number_of_players = len(players)\n for i in range(number_of_players):\n if players[i] == syft.local_worker:\n local_seed = players[i].torch.randint(high=cls.ring_size, size=[1])\n remote_seed = local_seed.send(players[(i + 1) % number_of_players])\n else:\n local_seed = players[i].remote.torch.randint(high=cls.ring_size, size=[1])\n remote_seed = local_seed.copy().move(players[(i + 1) % number_of_players])\n local_seeds.append(local_seed)\n remote_seeds.append(remote_seed)\n return {\n players[i]: (local_seeds[i], remote_seeds[(i - 1) % number_of_players])\n for i in range(number_of_players)\n }\n\n @property\n def generators(self):\n return self.__generators\n\n @generators.setter\n def generators(self, generators):\n self.__generators = generators\n\n\nRING_SIZE = 2 ** 32\nERR_MSG = \"You must call PRZS.setup because the seeds where not shared between workers\"\n\n\n@allow_command\ndef _initialize_generators(cur_seed, prev_seed):\n worker = cur_seed.owner\n cur_generator = torch.Generator()\n prev_generator = torch.Generator()\n\n cur_generator.manual_seed(cur_seed.item())\n prev_generator.manual_seed(prev_seed.item())\n\n worker.crypto_store.przs.generators = {\"cur\": cur_generator, \"prev\": prev_generator}\n\n\ndef get_random(name_generator, shape, worker):\n if worker == syft.local_worker:\n func = _get_random_tensor(name_generator, shape, worker.id)\n else:\n func = remote(_get_random_tensor, location=worker)\n\n return func(name_generator, shape, worker.id)\n\n\n@allow_command\ndef _get_random_tensor(name_generator, shape, worker_id, ring_size=RING_SIZE):\n worker = syft.local_worker.get_worker(worker_id)\n assert worker.crypto_store.przs.generators, ERR_MSG\n\n generators = worker.crypto_store.przs.generators\n\n gen = generators[name_generator]\n rand_elem = torch.randint(high=ring_size, size=shape, generator=gen, dtype=torch.long)\n return rand_elem\n\n\ndef gen_alpha_3of3(worker, ring_size=RING_SIZE):\n if worker == syft.local_worker:\n func = _generate_alpha_3of3\n else:\n func = remote(_generate_alpha_3of3, location=worker)\n\n return func(worker.id, ring_size)\n\n\ndef gen_alpha_2of3(worker, ring_size=RING_SIZE):\n if worker == syft.local_worker:\n func = _generate_alpha_2of3\n else:\n func = remote(_generate_alpha_2of3, location=worker)\n\n return func(worker.id, ring_size)\n\n\n@allow_command\ndef _generate_alpha_3of3(worker_id, ring_size=RING_SIZE):\n \"\"\"\n Generate a random number (alpha) using the two generators\n * generator cur - represents a generator initialized with this worker (i) seed\n * generator prev - represents a generator initialized with\n the previous worker (i-1) seed\n \"\"\"\n worker = syft.local_worker.get_worker(worker_id)\n assert worker.crypto_store.przs.generators, ERR_MSG\n\n generators = worker.crypto_store.przs.generators\n\n cur_gen = generators[\"cur\"]\n prev_gen = generators[\"prev\"]\n\n alpha = __get_next_elem(cur_gen, ring_size) - __get_next_elem(prev_gen, ring_size)\n return alpha\n\n\n@allow_command\ndef _generate_alpha_2of3(worker_id, ring_size=RING_SIZE):\n \"\"\"\n Generate 2 random numbers (alpha_i, alpha_i-1) using the two generators\n * generator cur - represents a generator initialized with this worker (i) seed\n and it generates alpha_i\n * generator prev - represents a generator initialized with\n the previous worker (i-1) seed and it generates alpha_i-1\n \"\"\"\n worker = syft.local_worker.get_worker(worker_id)\n assert worker.crypto_store.przs.generators, ERR_MSG\n\n generators = worker.crypto_store.przs.generators\n\n cur_gen = generators[\"cur\"]\n prev_gen = generators[\"prev\"]\n\n alpha_cur, alpha_prev = (\n __get_next_elem(cur_gen, ring_size),\n __get_next_elem(prev_gen, ring_size),\n )\n return torch.tensor(alpha_cur.item()), torch.tensor(alpha_prev.item())\n\n\ndef __get_next_elem(generator, ring_size=RING_SIZE, shape=(1,)):\n tensor = torch.empty(shape, dtype=torch.long)\n return tensor.random_(0, ring_size, generator=generator)\n\n\nPrimitiveStorage.register_component(\"przs\", PRZS)\n", "sub_path": "syft/frameworks/torch/mpc/przs.py", "file_name": "przs.py", "file_ext": "py", "file_size_in_byte": 5143, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "syft.local_worker", "line_number": 18, "usage_type": "attribute"}, {"api_name": "syft.generic.utils.remote", "line_number": 21, "usage_type": "call"}, {"api_name": "syft.local_worker", "line_number": 33, "usage_type": "attribute"}, {"api_name": "torch.Generator", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.Generator", "line_number": 63, "usage_type": "call"}, {"api_name": "syft.generic.utils.allow_command", "line_number": 59, "usage_type": "name"}, {"api_name": "syft.local_worker", "line_number": 72, "usage_type": "attribute"}, {"api_name": "syft.generic.utils.remote", "line_number": 75, "usage_type": "call"}, {"api_name": "syft.local_worker.get_worker", "line_number": 82, "usage_type": "call"}, {"api_name": "syft.local_worker", "line_number": 82, "usage_type": "attribute"}, {"api_name": "torch.randint", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 88, "usage_type": "attribute"}, {"api_name": "syft.generic.utils.allow_command", "line_number": 80, "usage_type": "name"}, {"api_name": "syft.local_worker", "line_number": 93, "usage_type": "attribute"}, {"api_name": "syft.generic.utils.remote", "line_number": 96, "usage_type": "call"}, {"api_name": "syft.local_worker", "line_number": 102, "usage_type": "attribute"}, {"api_name": "syft.generic.utils.remote", "line_number": 105, "usage_type": "call"}, {"api_name": "syft.local_worker.get_worker", "line_number": 118, "usage_type": "call"}, {"api_name": "syft.local_worker", "line_number": 118, "usage_type": "attribute"}, {"api_name": "syft.generic.utils.allow_command", "line_number": 110, "usage_type": "name"}, {"api_name": "syft.local_worker.get_worker", "line_number": 139, "usage_type": "call"}, {"api_name": "syft.local_worker", "line_number": 139, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 151, "usage_type": "call"}, {"api_name": "syft.generic.utils.allow_command", "line_number": 130, "usage_type": "name"}, {"api_name": "torch.empty", "line_number": 155, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 155, "usage_type": "attribute"}, {"api_name": "syft.frameworks.torch.mpc.primitives.PrimitiveStorage.register_component", "line_number": 159, "usage_type": "call"}, {"api_name": "syft.frameworks.torch.mpc.primitives.PrimitiveStorage", "line_number": 159, "usage_type": "name"}]} +{"seq_id": "485962403", "text": "import os\nfrom django.test import SimpleTestCase\nfrom django.test.utils import override_settings\nfrom corehq.apps.app_manager.tests import TestFileMixin\nfrom corehq.apps.userreports.models import CustomDataSourceConfiguration\n\n\nclass TestCustomDataSource(SimpleTestCase, TestFileMixin):\n\n file_path = ('data', 'custom_data_sources')\n root = os.path.dirname(__file__)\n\n def test_wrap(self):\n wrapped = CustomDataSourceConfiguration.wrap(self.get_json('sample_custom_data_source'))\n self.assertEqual([\"example\", \"dimagi\"], wrapped.domains)\n\n def test_get_all(self):\n with override_settings(CUSTOM_DATA_SOURCES=[self.get_path('sample_custom_data_source', 'json')]):\n all = list(CustomDataSourceConfiguration.all())\n self.assertEqual(2, len(all))\n example, dimagi = all\n self.assertEqual('example', example.domain)\n self.assertEqual('dimagi', dimagi.domain)\n for config in all:\n self.assertEqual('all_candidates', config.table_id)\n\n def test_production_config(self):\n for data_source in CustomDataSourceConfiguration.all():\n data_source.validate()\n", "sub_path": "corehq/apps/userreports/tests/test_custom_data_sources.py", "file_name": "test_custom_data_sources.py", "file_ext": "py", "file_size_in_byte": 1179, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.test.SimpleTestCase", "line_number": 8, "usage_type": "name"}, {"api_name": "corehq.apps.app_manager.tests.TestFileMixin", "line_number": 8, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "corehq.apps.userreports.models.CustomDataSourceConfiguration.wrap", "line_number": 14, "usage_type": "call"}, {"api_name": "corehq.apps.userreports.models.CustomDataSourceConfiguration", "line_number": 14, "usage_type": "name"}, {"api_name": "django.test.utils.override_settings", "line_number": 18, "usage_type": "call"}, {"api_name": "corehq.apps.userreports.models.CustomDataSourceConfiguration.all", "line_number": 19, "usage_type": "call"}, {"api_name": "corehq.apps.userreports.models.CustomDataSourceConfiguration", "line_number": 19, "usage_type": "name"}, {"api_name": "corehq.apps.userreports.models.CustomDataSourceConfiguration.all", "line_number": 28, "usage_type": "call"}, {"api_name": "corehq.apps.userreports.models.CustomDataSourceConfiguration", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "302536094", "text": "import pymysql\nimport time\nfrom pymysql.cursors import DictCursor\nfrom pybase.apollo_setting import get_project_settings\n\nsettings = get_project_settings()\n\nMYSQL_HOST = settings.get(\"MYSQL_HOST\")\nMYSQL_USER = settings.get(\"MYSQL_USER\")\nMYSQL_PASSWORD = settings.get(\"MYSQL_PASSWORD\")\nMYSQL_DATABASE = settings.get(\"MYSQL_DATABASE\")\nMYSQL_TABLE = settings.get(\"MYSQL_TABLE\")\n# MYSQL_TABLE = settings.get(\"MYSQL_TABLE_TEST\")\n\n# MYSQL_HOST = '192.168.0.39'\n# MYSQL_USER = 'root'\n# MYSQL_PASSWORD = 'root'\n# MYSQL_DATABASE = 'js'\n# # MYSQL_TABLE = settings.get(\"MYSQL_TABLE\")\n# MYSQL_TABLE = 'd44_data_new'\n\n\ndef connet():\n db = pymysql.connect(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DATABASE)\n\n cursor = db.cursor(DictCursor)\n\n return db, cursor\n\n\ndef get_cookie():\n db = pymysql.connect('192.168.0.11', 'root', '123456', 'wgf_catalog')\n cursor = db.cursor(DictCursor)\n sql = \"select * from robo_cookie_chf\"\n cursor.execute(sql)\n while True:\n user_pwd = cursor.fetchone()\n if not user_pwd['state']:\n break\n cursor.close()\n db.close()\n return user_pwd\n\n\ndef change_state(cookie):\n db = pymysql.connect('192.168.0.11', 'root', '123456', 'wgf_catalog')\n cursor = db.cursor(DictCursor)\n try:\n sql2 = \"UPDATE robo_cookie_chf SET state = 1 WHERE usr = '%s'\" % (cookie['usr'])\n cursor.execute(sql2)\n db.commit()\n except:\n db.rollback()\n\n cursor.close()\n db.close()\n\n\ndef restore(cookie):\n db = pymysql.connect('192.168.0.11', 'root', '123456', 'wgf_catalog')\n cursor = db.cursor(DictCursor)\n last_time = time.time()\n try:\n sql2 = \"UPDATE robo_cookie_chf SET state = 0 WHERE usr = '%s' and last_time = '%s'\" % (cookie['usr'], last_time)\n cursor.execute(sql2)\n db.commit()\n except:\n db.rollback()\n\n cursor.close()\n db.close()\n\n\n# 检查数据库是否有表,没有就建表\ndef check_table():\n db, cursor = connet()\n sql = \"show tables\"\n cursor.execute(sql)\n table_list = [item[key] for item in cursor.fetchall() for key in item]\n if MYSQL_TABLE not in table_list:\n create = \"CREATE TABLE %s(menu_id VARCHAR(255) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL,menu_name VARCHAR(255) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL, parent_menu_id VARCHAR(255) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT NULL, isRep VARCHAR(255) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT NULL, PRIMARY KEY (menu_id))\" % (\n MYSQL_TABLE)\n cursor.execute(create)\n db.close()\n # return table_list\n\n\n# 获取该节点的数量\ndef select_count(parent_id):\n db, cursor = connet()\n\n sql = \"SELECT * FROM %s where parent_menu_id = '%s'\" % (MYSQL_TABLE, parent_id)\n # print(sql)\n count = cursor.execute(sql)\n # print(count)\n cursor.close()\n db.close()\n\n return count\n\n\n# 判断是否有重复\ndef select(menu_name, parent_menu_id, isRep):\n db, cursor = connet()\n sql = \"SELECT * FROM %s where menu_name = '%s' and parent_menu_id = '%s' and isRep = '%s'\" % (\n MYSQL_TABLE, menu_name, parent_menu_id, isRep)\n # print(sql)\n count = cursor.execute(sql)\n a = cursor.fetchone()\n cursor.close()\n db.close()\n return a\n\n\ndef insert(menu_id, menu_name, parent_id, isRep):\n db, cursor = connet()\n try:\n # 这种方式可解决验证问题\n sql = \"insert into %s(menu_id, menu_name, parent_menu_id, isRep) values('%s','%s','%s','%s')\" % (\n MYSQL_TABLE, menu_id, menu_name, parent_id, isRep)\n # print(sql)\n info = cursor.execute(sql)\n # print(info)\n db.commit()\n cursor.close()\n db.close()\n return info\n except Exception as e:\n # print(e)\n db.rollback()\n\n\nif __name__ == '__main__':\n a = get_cookie()\n print(a)\n change_state(a)\n", "sub_path": "data_spider/Scrapy_E_robodata_V1_01_pass/build/lib/Scrapy_E_robodata_V1_01/mysqlAPI.py", "file_name": "mysqlAPI.py", "file_ext": "py", "file_size_in_byte": 3874, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pybase.apollo_setting.get_project_settings", "line_number": 6, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 24, "usage_type": "call"}, {"api_name": "pymysql.cursors.DictCursor", "line_number": 26, "usage_type": "argument"}, {"api_name": "pymysql.connect", "line_number": 32, "usage_type": "call"}, {"api_name": "pymysql.cursors.DictCursor", "line_number": 33, "usage_type": "argument"}, {"api_name": "pymysql.connect", "line_number": 46, "usage_type": "call"}, {"api_name": "pymysql.cursors.DictCursor", "line_number": 47, "usage_type": "argument"}, {"api_name": "pymysql.connect", "line_number": 60, "usage_type": "call"}, {"api_name": "pymysql.cursors.DictCursor", "line_number": 61, "usage_type": "argument"}, {"api_name": "time.time", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "568777284", "text": "\"\"\" Unittest of spUtils/mayaUtils/__init__.py\n\"\"\"\nimport unittest2\n\nfrom spUtils import detectMaya\n\nclass Test_init(unittest2.TestCase):\n\n @staticmethod\n def __test_maya():\n return detectMaya.maya() or detectMaya.mayaBatch()\n\n # skipped if __test_maya is False\n @unittest2.skipIf(\n __test_maya,\n 'Not under maya.'\n )\n def test_decorator(self):\n from spUtils.mayaUtils.waitCursor import WaitCursor\n\n self.assertRaises(TypeError, WaitCursor, None)\n\n instance = WaitCursor()\n wrapper = instance(fake_function)\n\n self.assertIsNot(wrapper, fake_function)\n self.assertEqual(wrapper.__doc__, fake_function.__doc__)\n self.assertEqual(wrapper.__name__, fake_function.__name__)\n self.assertEqual(wrapper.__dict__, fake_function.__dict__)\n\n\n\n # skipped if __test_maya returns True\n @unittest2.skipUnless(\n __test_maya,\n 'Maya Friendly Context: won\\'t raise the error.'\n )\n def test_import_raise(self):\n self.assertRaises(EnvironmentError, self.import_)\n\n\n def import_(self):\n import spUtils.mayaUtils as mayaUtils\n return mayaUtils\n\n\nif __name__ == '__main__':\n unittest2.main()\n", "sub_path": "misc/spUtils/test/test_mayaUtils_waitCursor.py", "file_name": "test_mayaUtils_waitCursor.py", "file_ext": "py", "file_size_in_byte": 1216, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "unittest2.TestCase", "line_number": 7, "usage_type": "attribute"}, {"api_name": "spUtils.detectMaya.maya", "line_number": 11, "usage_type": "call"}, {"api_name": "spUtils.detectMaya", "line_number": 11, "usage_type": "name"}, {"api_name": "spUtils.detectMaya.mayaBatch", "line_number": 11, "usage_type": "call"}, {"api_name": "spUtils.mayaUtils.waitCursor.WaitCursor", "line_number": 21, "usage_type": "name"}, {"api_name": "spUtils.mayaUtils.waitCursor.WaitCursor", "line_number": 23, "usage_type": "call"}, {"api_name": "unittest2.skipIf", "line_number": 14, "usage_type": "call"}, {"api_name": "unittest2.skipUnless", "line_number": 34, "usage_type": "call"}, {"api_name": "spUtils.mayaUtils", "line_number": 44, "usage_type": "name"}, {"api_name": "unittest2.main", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "500541506", "text": "from django.contrib.auth.views import SuccessURLAllowedHostsMixin\nfrom django.utils.http import is_safe_url\n\n\nclass CssClassesFormMixin(object):\n error_css_class = 'is-invalid'\n success_css_class = 'is-valid'\n\n def __init__(self, *args, **kwargs):\n super(CssClassesFormMixin, self).__init__(*args, **kwargs)\n if self.is_bound:\n for f in self:\n class_attr = f.field.widget.attrs.get('class', '')\n class_attr += ' %s' % (self.error_css_class if f.errors else self.success_css_class)\n f.field.widget.attrs['class'] = class_attr\n\n\nclass NextRedirectMixin(SuccessURLAllowedHostsMixin):\n\n def get_safe_url(self, url):\n return url if is_safe_url(\n url,\n self.get_success_url_allowed_hosts(),\n require_https=self.request.is_secure(),\n ) else None\n\n def get(self, request, *args, **kwargs):\n if 'next' in request.GET:\n request.session['next'] = self.get_safe_url(request.GET['next'])\n return super(NextRedirectMixin, self).get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super(NextRedirectMixin, self).get_context_data(**kwargs)\n if 'next' in self.request.GET:\n context['next'] = self.get_safe_url(self.request.GET['next'])\n return context\n\n def get_success_url(self):\n if 'next' in self.request.POST:\n return self.get_safe_url(self.request.POST['next'])\n if 'next' in self.request.session:\n return self.get_safe_url(self.request.session['next'])\n return super(NextRedirectMixin, self).get_success_url()\n", "sub_path": "richardcornish/utils/mixins.py", "file_name": "mixins.py", "file_ext": "py", "file_size_in_byte": 1666, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.contrib.auth.views.SuccessURLAllowedHostsMixin", "line_number": 18, "usage_type": "name"}, {"api_name": "django.utils.http.is_safe_url", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "324238557", "text": "# -*- coding: utf-8 -*-\n\nfrom django.db import models\nfrom django.core.cache import cache\nfrom django.contrib.contenttypes import generic\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.text import slugify\n\n\nclass Hit(models.Model):\n created = models.DateField(auto_now_add=True, db_index=True)\n updated = models.DateField(auto_now=True, db_index=True)\n hits = models.PositiveIntegerField(default=0)\n content_type = models.ForeignKey(ContentType)\n object_pk = models.CharField(max_length=256)\n\n class Meta:\n unique_together = ('content_type', 'object_pk')\n\n\nclass HitCounterModelMixin(object):\n\n @property\n def hits(self):\n content_type = ContentType.objects.get_for_model(self.__class__,\n for_concrete_model=False)\n try:\n hit = Hit.objects.get(content_type=content_type,\n object_pk=self.pk)\n except Hit.DoesNotExist:\n return 0\n\n return hit.hits\n\n def hit(self, request=None):\n content_type = ContentType.objects.get_for_model(self.__class__)\n\n # Here we cache the user's IP to ensure that the same\n # IP won't hit the same page again for while\n if request:\n ip_addr = request.META.get('REMOTE_ADDR')\n cache_key = u'page_hits-{}-{}-{}'.format(ip_addr,\n content_type, self.pk)\n cache_key = slugify(cache_key)\n\n duplicate = cache.get(cache_key)\n if duplicate:\n return\n cache.set(cache_key, True)\n\n # Everything ok, so just increment the page count\n hit_pk = Hit.objects.get_or_create(content_type=content_type,\n object_pk=self.pk)[0].pk\n\n # Using this way instead of hits += 1 forces django to\n # call the UPDATE directly in the database avoiding\n # cuncurrency problems\n Hit.objects.filter(pk=hit_pk).update(hits=models.F(\"hits\") + 1)\n\n", "sub_path": "hitcounter/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 2088, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.db.models.Model", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 10, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 11, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 12, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 12, "usage_type": "name"}, {"api_name": "django.db.models.PositiveIntegerField", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 14, "usage_type": "call"}, {"api_name": "django.contrib.contenttypes.models.ContentType", "line_number": 14, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 15, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects.get_for_model", "line_number": 25, "usage_type": "call"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.contrib.contenttypes.models.ContentType", "line_number": 25, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects.get_for_model", "line_number": 36, "usage_type": "call"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "django.contrib.contenttypes.models.ContentType", "line_number": 36, "usage_type": "name"}, {"api_name": "django.utils.text.slugify", "line_number": 44, "usage_type": "call"}, {"api_name": "django.core.cache.cache.get", "line_number": 46, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 46, "usage_type": "name"}, {"api_name": "django.core.cache.cache.set", "line_number": 49, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 49, "usage_type": "name"}, {"api_name": "django.db.models.F", "line_number": 58, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 58, "usage_type": "name"}]} +{"seq_id": "196351162", "text": "\"\"\"\nThe sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.\n\nFind the sum of all the primes below two million.\n\"\"\"\n# Use sieve of eratastothenes?\nmax_number = 2000000\n\nimport logging\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nstatus_list = [True for x in range(2, max_number + 1)]\nlogging.debug(status_list)\n\nfor test_number in range(2, max_number + 1):\n for flipper in range(test_number * 2, max_number + 1, test_number):\n status_list[flipper-2] = False\n\nlogging.debug(status_list)\n\nprimes = [x for x in range(2, max_number + 1) if status_list[x-2] == True]\n\nlogging.info(primes)\n\ntotal = 0\n\nfor n in primes:\n total += n\n\nlogging.info(\"answer is {}\".format(total))", "sub_path": "euler10.py", "file_name": "euler10.py", "file_ext": "py", "file_size_in_byte": 711, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "logging.basicConfig", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 10, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 14, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 20, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "38145652", "text": "from django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.views.decorators.clickjacking import xframe_options_exempt\nfrom django.views.generic import TemplateView\n\nfrom help_bot.statistic import save_site_statistic\nfrom help_bot.web_chat_logic import chat_req_get\n\n\nclass MainPage(TemplateView):\n template_name = 'help_bot/main_page.html'\n\n def get(self, request, *args, **kwargs):\n save_site_statistic()\n return render(request, template_name=self.template_name)\n\n\nclass WebChatBot(TemplateView):\n \"\"\" Web chat bot pop-up. All Ajax requests come here. \"\"\"\n\n def get(self, request, *args, **kwargs):\n save_site_statistic()\n return HttpResponse(chat_req_get(request))\n\n\n@xframe_options_exempt\ndef web_chat(request):\n \"\"\" iframe window/widget \"\"\"\n if request.method == \"GET\":\n save_site_statistic()\n return render(request, template_name='help_bot/chat.html')\n", "sub_path": "ButonBotDjango/help_bot/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 943, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.views.generic.TemplateView", "line_number": 10, "usage_type": "name"}, {"api_name": "help_bot.statistic.save_site_statistic", "line_number": 14, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 15, "usage_type": "call"}, {"api_name": "django.views.generic.TemplateView", "line_number": 18, "usage_type": "name"}, {"api_name": "help_bot.statistic.save_site_statistic", "line_number": 22, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 23, "usage_type": "call"}, {"api_name": "help_bot.web_chat_logic.chat_req_get", "line_number": 23, "usage_type": "call"}, {"api_name": "help_bot.statistic.save_site_statistic", "line_number": 30, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 31, "usage_type": "call"}, {"api_name": "django.views.decorators.clickjacking.xframe_options_exempt", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "395394981", "text": "from django.conf.urls import patterns, url\nfrom django.contrib.auth.views import login, logout\n\nfrom portal import views\n\nurlpatterns = patterns('',\n (r'^$', views.index),\n (r'^ebridge/$', views.ebridge),\n (r'^orders/$', views.orders),\n)\n\nurlpatterns += patterns('',\n (r'^accounts/login/$', login),\n url(r'^accounts/logout/$', logout, kwargs={'next_page':'/'}),\n)", "sub_path": "portal/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 379, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.conf.urls.patterns", "line_number": 6, "usage_type": "call"}, {"api_name": "portal.views.index", "line_number": 7, "usage_type": "attribute"}, {"api_name": "portal.views", "line_number": 7, "usage_type": "name"}, {"api_name": "portal.views.ebridge", "line_number": 8, "usage_type": "attribute"}, {"api_name": "portal.views", "line_number": 8, "usage_type": "name"}, {"api_name": "portal.views.orders", "line_number": 9, "usage_type": "attribute"}, {"api_name": "portal.views", "line_number": 9, "usage_type": "name"}, {"api_name": "django.conf.urls.patterns", "line_number": 12, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.login", "line_number": 13, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.logout", "line_number": 14, "usage_type": "argument"}]} +{"seq_id": "292821911", "text": "#使用cookiejar的目的:管理cookie,保存cookie值,\n#一旦存储cookie之后,下一次发起请求的时候就会携带cookie\n#cookie是保存在内存里面的,最后会进行垃圾回收\n\nfrom urllib import request,parse\nfrom http.cookiejar import CookieJar\n\n#创建cookiejar对象,目的如上\ncookie_jar = CookieJar()\n\n#HTTPCookieProcessor创建handle处理器,管理cookiejar\nhandler = request.HTTPCookieProcessor(cookie_jar)\n\n#自定义opener\nopener = request.build_opener(handler)\n\n#分析发现\n# https://www.douban.com/accounts/login\n# 没有验证码的情况\n# source: index_nav\n# form_email: 18518753265\n# form_password: ljh12345678\n\n#有验证码的情况\n# source: index_nav\n# form_email: 18518753265\n# form_password: ljh12345678\n# captcha-solution: blade\n# captcha-id: 5IBtw5wm2riyrIrnV3utwUPt:en\n\nurl = 'https://accounts.douban.com/login'\n\nform_data = {\n 'source': 'index_nav',\n 'form_email': '18518753265',\n 'form_password': 'ljh12345678',\n 'captcha-solution': 'science',\n 'captcha-id': 'PcPUVYrRWIai40vo7CCdefb2:en'\n}\n\nform_data = parse.urlencode(form_data).encode('utf-8')\n\n#设置请求头\nreq_header = {\n 'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',\n}\n\n#够建一个request对象\nreq = request.Request(url,headers=req_header,data=form_data)\n\n#发起请求\nresponse = opener.open(req)\n\n#登录成功后访问个人主页,能够成功获取到个人主页信息,说明确实保存了cookie\n#并且在一下次发起请求的时候携带了cookie\nurl = 'https://www.douban.com/people/175417123/'\n\nreq = request.Request(url,headers=req_header)\n\nresponse = opener.open(req)\n\nif response.status == 200:\n with open('douban.html','w') as file:\n file.write(response.read().decode('utf-8'))\n\n\n\n\n\n", "sub_path": "第一周/第三天/03day/urllib_cookiejar.py", "file_name": "urllib_cookiejar.py", "file_ext": "py", "file_size_in_byte": 1818, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "http.cookiejar.CookieJar", "line_number": 9, "usage_type": "call"}, {"api_name": "urllib.request.HTTPCookieProcessor", "line_number": 12, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 12, "usage_type": "name"}, {"api_name": "urllib.request.build_opener", "line_number": 15, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 15, "usage_type": "name"}, {"api_name": "urllib.parse.urlencode", "line_number": 41, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 41, "usage_type": "name"}, {"api_name": "urllib.request.Request", "line_number": 49, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 49, "usage_type": "name"}, {"api_name": "urllib.request.Request", "line_number": 58, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 58, "usage_type": "name"}]} +{"seq_id": "253971638", "text": "from django.shortcuts import render\nfrom django.conf import settings\nfrom django.views.decorators.csrf import ensure_csrf_cookie\nfrom django.http import JsonResponse\nfrom django.contrib.auth.decorators import login_required\n\nimport json\nimport requests\nimport pdb\n# Create your views here.\n\ncountryd = {'ar':'Argentina', 'uy':'Uruguay'}\n\ndef init(request):\n return render(request,\"init.html\")\n\n\n@login_required(login_url='/login')\ndef delete(request):\n if request.method == \"POST\":\n json_data = json.loads(request.body)\n try:\n id_for_delete = json_data['deleteid']\n except KeyError:\n HttpResponseServerError(\"Malformed data!\")\n # make request to delete data\n r = requests.delete(settings.API_BASE_URL + 'places/%s'%id_for_delete)\n if r.status_code == 204:\n return JsonResponse({'result':'OK'})\n else:\n return JsonResponse({'result':r.status_code})\n\n@login_required(login_url='/login')\ndef update(request):\n if request.method == \"POST\":\n json_data = json.loads(request.body)\n # get id from url\n id_ = request.path.split('/')[3]\n location = {}\n location['lat'] = json_data.pop('lat')\n location['lng'] = json_data.pop('lon')\n json_data['Location'] = json.dumps(location)\n json_data['Country'] = countryd[json_data.pop('Country')]\n r = requests.post(settings.API_BASE_URL + \n 'places/update?where={\"id\":\"%s\"}'%id_,\n data=json_data)\n if r.status_code == 204:\n return JsonResponse({'result':'OK'})\n\n@login_required(login_url='/login')\ndef addnew(request):\n headers = {'content-type': 'application/json'}\n r = requests.post(settings.API_BASE_URL + 'places',\n data=request.body, headers=headers)\n if r.status_code == 200:\n return JsonResponse({'result':'OK'})\n else:\n return JsonResponse({'result':r.status_code})\n\n@login_required(login_url='/login')\ndef home(request):\n\n def _pagination(record_nmbr, bin_size):\n bins = record_nmbr / bin_size\n bin_resto = record_nmbr // bin_size\n bins_lst = []\n nlist = []\n for x in range(bins):\n nlist.append(x*bin_size)\n\n first = True\n for x in nlist[1:]:\n if first:\n bins_lst.append((0,x))\n first = False\n ant = x\n else:\n bins_lst.append((ant+1,x))\n ant = x\n return bins_lst\n \n \n if request.GET:\n cc = request.GET['country']\n country = countryd.get(cc,'Argentina')\n\n\n if 'from' in request.GET:\n from_ = int(request.GET['from'])\n to_ = int(request.GET['to'])\n bin_size = (to_-from_)+1\n requrl = settings.API_BASE_URL + 'places?filter={\"where\":{\"Country\":\"%s\"},\"skip\":%s,\"limit\":%s}'%(country, from_, bin_size)\n response = requests.get(requrl)\n json_res = response.json\n prev_from = from_ - bin_size\n prev_to = from_ - 1\n next_from = to_ + 1\n next_to = to_ + bin_size\n record_nmbr = int(request.GET['record_nmbr'])\n else:\n bin_size = int(request.GET['points'])\n requrl = settings.API_BASE_URL + 'places/count/'\n response = requests.get(requrl)\n record_nmbr = response.json()['count'] \n \n requrl = settings.API_BASE_URL + 'places?filter={\"where\":{\"Country\":\"%s\"},\"limit\":%s}'%(country, bin_size)\n response = requests.get(requrl)\n json_res = response.json\n prev_from = 0\n prev_to = 0\n next_from = bin_size + 1\n next_to = record_nmbr\n\n\n\n bins_lst = _pagination(record_nmbr, bin_size)\n bins = len(bins_lst)\n # get next and previous\n # previous\n\n context = {'data':json_res, 'country':country, 'bin_size':bin_size,\n 'bins':bins, 'bins_lst':bins_lst, 'cc':cc,\n 'prev_from': prev_from, \n 'prev_to': prev_to,\n 'next_from':next_from,\n 'next_to':next_to,\n 'record_nmbr':record_nmbr,\n }\n\n \n\n return render(request, \"country.html\", context)\n\n\n else:\n return render(request,\"index.html\")\n\n@ensure_csrf_cookie\ndef country(request,cc):\n\n # WARNING: NO USED NOW!!!\n # for pagination\n # Get ammount of records\n\n requrl = settings.API_BASE_URL + 'places?filter={\"where\":{\"Country\":\"%s\"},\"limit\":50}'%country\n response = requests.get(requrl)\n json_res = response.json \n context = {'data':json_res, 'country':country}\n\n\n return render(request, \"country.html\", context)", "sub_path": "backoffice/dswipper/swipperbo/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4779, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.shortcuts.render", "line_number": 15, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 21, "usage_type": "call"}, {"api_name": "requests.delete", "line_number": 27, "usage_type": "call"}, {"api_name": "django.conf.settings.API_BASE_URL", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 27, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 29, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 31, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 18, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 36, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 42, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 44, "usage_type": "call"}, {"api_name": "django.conf.settings.API_BASE_URL", "line_number": 44, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 44, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 48, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 33, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 53, "usage_type": "call"}, {"api_name": "django.conf.settings.API_BASE_URL", "line_number": 53, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 53, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 56, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 58, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 50, "usage_type": "call"}, {"api_name": "django.conf.settings.API_BASE_URL", "line_number": 92, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 92, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 93, "usage_type": "call"}, {"api_name": "django.conf.settings.API_BASE_URL", "line_number": 102, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 102, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 103, "usage_type": "call"}, {"api_name": "django.conf.settings.API_BASE_URL", "line_number": 106, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 106, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 107, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 132, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 136, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 60, "usage_type": "call"}, {"api_name": "django.conf.settings.API_BASE_URL", "line_number": 145, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 145, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 146, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 151, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.ensure_csrf_cookie", "line_number": 138, "usage_type": "name"}]} +{"seq_id": "256724563", "text": "from django.shortcuts import render\nfrom django.shortcuts import get_object_or_404\nfrom .models import Project, Test, Run, RunTest\nfrom .update_projects_in_db import update_projects, update_tests\nfrom .put_set_tests_to_db import update_runned_tests\nfrom django.shortcuts import redirect\n\nimport importlib\nimport argparse\nimport unittest\nimport os\nimport sys\nimport time\nfrom .html_test_runner.html_test_runner import TestRunner\n\ndef index(request):\n update_projects()\n projects = Project.objects.order_by('name')\n\n context = {\n 'projects': projects\n }\n return render(request, 'web_gui/index.html', context)\n\ndef runnedTests(request):\n query = 'SELECT DISTINCT * ' \\\n 'FROM \"web_gui_run\" '\n\n tests = Run.objects.raw(query)\n context = {\n 'tests': tests\n }\n return render(request, 'web_gui/runnedTests.html', context)\n\ndef runnedTest(request, runnedtest_id):\n query = 'SELECT DISTINCT * ' \\\n 'FROM \"web_gui_run\" ' \\\n 'INNER JOIN \"web_gui_runtest\" on \"web_gui_runtest\".\"run_id_id\" = \"web_gui_run\".\"id\" ' \\\n 'INNER JOIN \"web_gui_test\" on \"web_gui_runtest\".\"test_id_id\" = \"web_gui_test\".\"id\"' \\\n 'where \"web_gui_runtest\".\"id\" = \\'{}\\''.format(str(runnedtest_id))\n test = RunTest.objects.raw(query)[0]\n context = {\n 'test': test,\n }\n return render(request, 'web_gui/runnedtest.html', context)\n\n\ndef raport(request, raport_id):\n query = 'SELECT DISTINCT * ' \\\n 'FROM \"web_gui_run\" ' \\\n 'INNER JOIN \"web_gui_runtest\" on \"web_gui_runtest\".\"run_id_id\" = \"web_gui_run\".\"id\" ' \\\n 'INNER JOIN \"web_gui_test\" on \"web_gui_runtest\".\"test_id_id\" = \"web_gui_test\".\"id\"'\\\n 'WHERE \"web_gui_run\".\"id\" = \\'{}\\''.format(str(raport_id))\n tests = Run.objects.raw(query)\n success = 0\n fail = 0\n error = 0\n skip = 0\n for i in tests:\n if i.result == 0:\n success += 1\n elif i.result == 1:\n fail += 1\n elif i.result == 2:\n error += 1\n elif i.result == 3:\n skip += 1\n context = {\n 'tests': tests,\n 'success': success,\n 'fail': fail,\n 'error': error,\n 'skip': skip\n }\n return render(request, 'web_gui/raport.html', context)\n\n\n\ndef _create_html_results_folder():\n cwd = os.getcwd()\n results_path = '{}/output/{}'.format(cwd, \"asd\")\n if not os.path.exists(results_path):\n os.makedirs(results_path)\n return results_path\n\n\ndef project(request, project_id):\n update_tests()\n\n\n project_name = get_object_or_404(Project, id=project_id)\n query = 'SELECT DISTINCT \"web_gui_test\".\"id\" ' \\\n 'FROM \"web_gui_test\" ' \\\n 'INNER JOIN \"web_gui_project\" on \"web_gui_test\".\"project_id_id\" = \"web_gui_project\".\"id\" where \"web_gui_project\".\"id\" = \\'{}\\''.format(str(project_id))\n\n tests = Test.objects.raw(query)\n context = {\n 'project': project_name,\n 'tests': tests,\n }\n if request.POST:\n checkList = request.POST.getlist('checks')\n tests = []\n for p in checkList:\n test_name = get_object_or_404(Test, id=p)\n tests.append(test_name)\n suite1 = []\n for p in tests:\n numeTest = p.__str__()\n fullName = p.full_name\n className = fullName.split(numeTest)[0].split('.')[-2]\n path = fullName.split(numeTest)[0].split(className)[0][:-1]\n # className2 = importlib.import_module('projects.ib.' + path)\n className = getattr(importlib.import_module('projects.' + project_name.__str__() + '.' + path), className)\n suite1.append(className(numeTest))\n big_suite = unittest.TestSuite()\n for test_class in suite1:\n big_suite.addTest(test_class)\n\n\n\n html_results_path = _create_html_results_folder()\n filename = '{timestamp}.html'.format(timestamp=time.strftime(\"%Y%m%d-%H%M%S\"))\n test_result = TestRunner(\n title=\"asd\",\n output_path='{}/{}'.format(html_results_path, filename),\n ).run(big_suite)\n\n runnedTestsPage = update_runned_tests(test_result.result)\n return redirect('/raport/' + str(runnedTestsPage))\n\n return render(request, 'web_gui/project.html', context)\n", "sub_path": "runner/web_gui/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4301, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "update_projects_in_db.update_projects", "line_number": 17, "usage_type": "call"}, {"api_name": "models.Project.objects.order_by", "line_number": 18, "usage_type": "call"}, {"api_name": "models.Project.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "models.Project", "line_number": 18, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 23, "usage_type": "call"}, {"api_name": "models.Run.objects.raw", "line_number": 29, "usage_type": "call"}, {"api_name": "models.Run.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "models.Run", "line_number": 29, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 33, "usage_type": "call"}, {"api_name": "models.RunTest.objects.raw", "line_number": 41, "usage_type": "call"}, {"api_name": "models.RunTest.objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "models.RunTest", "line_number": 41, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 45, "usage_type": "call"}, {"api_name": "models.Run.objects.raw", "line_number": 54, "usage_type": "call"}, {"api_name": "models.Run.objects", "line_number": 54, "usage_type": "attribute"}, {"api_name": "models.Run", "line_number": 54, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 75, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 83, "usage_type": "call"}, {"api_name": "update_projects_in_db.update_tests", "line_number": 88, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 91, "usage_type": "call"}, {"api_name": "models.Project", "line_number": 91, "usage_type": "argument"}, {"api_name": "models.Test.objects.raw", "line_number": 96, "usage_type": "call"}, {"api_name": "models.Test.objects", "line_number": 96, "usage_type": "attribute"}, {"api_name": "models.Test", "line_number": 96, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 105, "usage_type": "call"}, {"api_name": "models.Test", "line_number": 105, "usage_type": "argument"}, {"api_name": "importlib.import_module", "line_number": 114, "usage_type": "call"}, {"api_name": "unittest.TestSuite", "line_number": 116, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 123, "usage_type": "call"}, {"api_name": "html_test_runner.html_test_runner.TestRunner", "line_number": 124, "usage_type": "call"}, {"api_name": "put_set_tests_to_db.update_runned_tests", "line_number": 129, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 130, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 132, "usage_type": "call"}]} +{"seq_id": "593945171", "text": "import tensorflow as tf\nfrom functools import partial\nimport numpy as np\n\nn_inputs = 28 * 28\nn_hidden1 = 300\nn_hidden2 = 150\nn_hidden3 = n_hidden1\nn_outputs = n_inputs\n\nlearning_rate = 0.01\nl2_reg = 0.0001\n\nX = tf.placeholder(tf.float32, shape=[None, n_inputs])\n\ninitializer = tf.contrib.layers.variance_scaling_initializer()\nregularizer = tf.contrib.layers.l2_regularizer(l2_reg)\nactivation = tf.nn.elu\n\nweights1_init = initializer([n_inputs, n_hidden1])\nweights2_init = initializer([n_hidden1, n_hidden2])\nweights3_init = initializer([n_hidden2, n_hidden3])\nweights4_init = initializer([n_hidden3, n_outputs])\n\nweights1 = tf.Variable(weights1_init, dtype=tf.float32, name=\"weights1\")\nweights2 = tf.Variable(weights2_init, dtype=tf.float32, name=\"weights2\")\nweights3 = tf.Variable(weights3_init, dtype=tf.float32, name=\"weights3\")\nweights4 = tf.Variable(weights4_init, dtype=tf.float32, name=\"weights4\")\n\nbiases1 = tf.Variable(tf.zeros(n_hidden1), name=\"biases1\")\nbiases2 = tf.Variable(tf.zeros(n_hidden2), name=\"biases2\")\nbiases3 = tf.Variable(tf.zeros(n_hidden3), name=\"biases3\")\nbiases4 = tf.Variable(tf.zeros(n_outputs), name=\"biases4\")\n\nhidden1 = activation(tf.matmul(X, weights1) + biases1)\nhidden2 = activation(tf.matmul(hidden1, weights2) + biases2)\nhidden3 = activation(tf.matmul(hidden2, weights3) + biases3)\noutputs = tf.matmul(hidden3, weights4) + biases4\n\nreconstructin_loss = tf.reduce_mean(tf.square(outputs - X))\n# reg_loss = regularizer(weights1) + regularizer(weights2)\n# loss = reconstructin_loss + reg_loss\n\noptimizer = tf.train.AdamOptimizer(learning_rate)\n\nwith tf.name_scope(\"phase1\"):\n phase1_outputs = tf.matmul(hidden1, weights4) + biases4\n phase1_reconstruction_loss = tf.reduce_mean(tf.square(phase1_outputs - X))\n phase1_reg_loss = regularizer(weights1) + regularizer(weights4)\n phase1_loss = phase1_reconstruction_loss + phase1_reg_loss\n phase1_training_op = optimizer.minimize(phase1_loss)\n\nwith tf.name_scope(\"phase2\"):\n phase2_reconstruction_loss = tf.reduce_mean(tf.square(hidden3 - hidden1))\n phase2_reg_loss = regularizer(weights2) + regularizer(weights3)\n phase2_loss = phase2_reconstruction_loss + phase2_reg_loss\n train_vars = [weights2, biases2, weights3, biases3]\n phase2_training_op = optimizer.minimize(phase2_loss, var_list=train_vars)\n\ninit = tf.global_variables_initializer()\n\nimport pickle\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\n\nwith open(\"mnist.pkl\", \"rb\") as f:\n mnist = pickle.load(f)\n\nX_train, X_test, y_train, y_test = train_test_split(mnist[\"data\"], mnist[\"target\"], random_state=42, test_size=0.33)\n\n#scaler = StandardScaler()\n#X_train = scaler.fit_transform(X_train)\n#X_test = scaler.transform(X_test)\n#std = np.std(X_train)\n#print(std)\n#X_train /= std\n#X_test /= std\n\ndef shuffle_batch(X, y, batch_size):\n rnd_idx = np.random.permutation(len(X))\n n_batches = len(X) // batch_size\n for batch_idx in np.array_split(rnd_idx, n_batches):\n X_batch, y_batch = X[batch_idx], y[batch_idx]\n yield X_batch, y_batch\n\ntraining_ops = [phase1_training_op, phase2_training_op]\nreconstructin_losses = [phase1_reconstruction_loss, phase2_reconstruction_loss]\nn_epochs = [4, 4]\nbatch_size = [150, 150]\nimport sys\n\nwith tf.Session() as sess:\n init.run()\n for phase in range(2):\n print(\"Training phase #{}\".format(phase + 1))\n for epoch in range(n_epochs[phase]):\n n_batches = X_train.shape[0] // batch_size[phase]\n for iteration, (X_batch, y_batch) in enumerate(shuffle_batch(X_train, y_train, n_batches)):\n print(\"\\r{}%\".format(100 * iteration // n_batches), end=\"\")\n sys.stdout.flush()\n sess.run(training_ops[phase], feed_dict={X: X_batch})\n loss_train = reconstructin_losses[phase].eval(feed_dict={X: X_batch})\n print(\"\\r{}\".format(epoch), \"Train MSE:\", loss_train)\n loss_test = reconstructin_loss.eval(feed_dict={X: X_test})\n print(\"Test MSE:\" , loss_test)\n outputs_val = outputs.eval(feed_dict={X: X_test})\n\nn_test_digits = 2\n\nfrom matplotlib import pyplot as plt\n\ndef plot_image(image, shape=[28, 28]):\n plt.imshow(image.reshape(shape), cmap=\"Greys\", interpolation=\"nearest\")\n plt.axis(\"off\")\n\nfor digit_index in range(n_test_digits):\n plt.subplot(n_test_digits, 2, digit_index * 2 + 1)\n plot_image(X_test[digit_index])\n plt.subplot(n_test_digits, 2, digit_index * 2 + 2)\n plot_image(outputs_val[digit_index])\n\nplt.show()", "sub_path": "tensorflow/15_3_4.py", "file_name": "15_3_4.py", "file_ext": "py", "file_size_in_byte": 4529, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "tensorflow.placeholder", "line_number": 14, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 14, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.variance_scaling_initializer", "line_number": 16, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 16, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.l2_regularizer", "line_number": 17, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 17, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 18, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 25, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 26, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 27, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 38, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 44, "usage_type": "attribute"}, {"api_name": "tensorflow.name_scope", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.name_scope", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 60, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 67, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.random.permutation", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 80, "usage_type": "attribute"}, {"api_name": "numpy.array_split", "line_number": 82, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 92, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 100, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 100, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}]} +{"seq_id": "640130128", "text": "# coding:utf-8\n\nfrom flask import Blueprint, request, render_template\nfrom flask_user import roles_required\nfrom ...services import group_service\nfrom ...helpers.flask_helper import json_response\nfrom ...models import Group\n\nbp = Blueprint('manager_groups', __name__, url_prefix='/manager/groups')\n\n\n@bp.route('/', methods=['GET'])\n@roles_required('manager')\ndef home_group_page():\n return render_template('backend/proGroupsMgr.html')\n\n\n@bp.route('/list', methods=['GET'])\n@roles_required('manager')\ndef list_group():\n limit = int(request.args.get(\"iDisplayLength\", \"10\"))\n offset = int(request.args.get(\"iDisplayStart\", \"0\"))\n sEcho = request.args.get(\"sEcho\")\n name = request.args.get('name') if request.args.get('name') else None\n filters = []\n if name:\n filters.append(Group.name.startswith(name))\n\n count, groups = group_service.paginate_by(filters=filters, orders=[Group.name.asc()], offset=offset, limit=limit)\n return json_response(sEcho=sEcho, iTotalRecords=count, iTotalDisplayRecords=count, aaData=groups)\n\n\n@bp.route('/create', methods=['POST'])\n@roles_required('manager')\ndef create_group():\n kwargs = request.json\n group = group_service.create_group(kwargs['name'])\n return json_response(group=group)\n\n\n@bp.route('//delete', methods=['POST'])\ndef delete_group(group_id):\n group_service.delete_group(group_id)\n return json_response(success=True)", "sub_path": "ca/backend/manager/groups.py", "file_name": "groups.py", "file_ext": "py", "file_size_in_byte": 1421, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "flask.Blueprint", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 15, "usage_type": "call"}, {"api_name": "flask_user.roles_required", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 21, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 21, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 22, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 22, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 24, "usage_type": "name"}, {"api_name": "models.Group.name.startswith", "line_number": 27, "usage_type": "call"}, {"api_name": "models.Group.name", "line_number": 27, "usage_type": "attribute"}, {"api_name": "models.Group", "line_number": 27, "usage_type": "name"}, {"api_name": "services.group_service.paginate_by", "line_number": 29, "usage_type": "call"}, {"api_name": "services.group_service", "line_number": 29, "usage_type": "name"}, {"api_name": "models.Group.name.asc", "line_number": 29, "usage_type": "call"}, {"api_name": "models.Group.name", "line_number": 29, "usage_type": "attribute"}, {"api_name": "models.Group", "line_number": 29, "usage_type": "name"}, {"api_name": "helpers.flask_helper.json_response", "line_number": 30, "usage_type": "call"}, {"api_name": "flask_user.roles_required", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 36, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 36, "usage_type": "name"}, {"api_name": "services.group_service.create_group", "line_number": 37, "usage_type": "call"}, {"api_name": "services.group_service", "line_number": 37, "usage_type": "name"}, {"api_name": "helpers.flask_helper.json_response", "line_number": 38, "usage_type": "call"}, {"api_name": "flask_user.roles_required", "line_number": 34, "usage_type": "call"}, {"api_name": "services.group_service.delete_group", "line_number": 43, "usage_type": "call"}, {"api_name": "services.group_service", "line_number": 43, "usage_type": "name"}, {"api_name": "helpers.flask_helper.json_response", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "131773301", "text": "##############################################################################################\nimport numpy as np\nimport cv2 as cv\n##############################################################################################\nwindow = 'frame'\ncv.namedWindow(window)\nwebcam = cv.VideoCapture(1)\n#################################################################\ndef nothing(x):\n pass \n#################################################################\ncv.createTrackbar('hmin', window,0,255,nothing)\ncv.createTrackbar('hmax', window,0,255,nothing)\ncv.createTrackbar('smin', window,0,255,nothing)\ncv.createTrackbar('smax', window,0,255,nothing)\ncv.createTrackbar('vmin', window,0,255,nothing)\ncv.createTrackbar('vmax', window,0,255,nothing)\ncv.setTrackbarPos('hmax', window,255)\ncv.setTrackbarPos('smax', window,255)\ncv.setTrackbarPos('vmax', window,255)\n##############################################################################################\nwhile(True):\n _,frame = webcam.read()\n #bluring the frame\n frame = cv.GaussianBlur(frame, (5, 5), 0)\n \n #trackbar stuff \n hmin = cv.getTrackbarPos('hmin',window)\n smin = cv.getTrackbarPos('smin',window)\n vmin = cv.getTrackbarPos('vmin',window)\n hmax = cv.getTrackbarPos('hmax',window)\n smax = cv.getTrackbarPos('smax',window)\n vmax = cv.getTrackbarPos('vmax',window)\n \n #color min max\n minbl = np.array([hmin, smin, vmin])\n maxbl = np.array([hmax, smax, vmax])\n #minbl= np.array([30,140,77])\n #maxbl= np.array([255,255,255])\n #hsv converter\n hsv = cv.cvtColor(frame,cv.COLOR_BGR2HSV)\n \n #frame to binary frame\n binaried = cv.inRange(hsv,minbl,maxbl)\n \n #removing the noise spots\n medianed = cv.medianBlur(binaried, 25) \n\n #finding the edge lines\n edges = cv.Canny(medianed, 75, 120)\n\n #getting the edge of lines\n lines = cv.HoughLinesP(edges, 1, np.pi/180, 50, maxLineGap=50)\n\n #getting the cordinants of lines\n try:\n if (len(lines) > 0):\n for line in lines:\n x1, y1, x2, y2 = line[0]\n cv.line(frame, (x1, y1), (x2, y2), (0, 255, 0), 5)\n print(\" x1 is: \",x1,\" y1 is: \",y1,\" x2 is: \",x2,\" y2 is: \",y2)\n \n except:\n print(\"nothing found\")\n\n #showing some stuff\n cv.imshow(\"frame\", frame)\n cv.imshow(\"edges\", edges)\n cv.imshow(\"hsv\", hsv)\n############################################### \n ikey = cv.waitKey(1)\n if(ord(\"q\") == ikey):\n cv.destroyAllWindows()\n break\n \n", "sub_path": "all/getting lines.py", "file_name": "getting lines.py", "file_ext": "py", "file_size_in_byte": 2494, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "cv2.namedWindow", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.setTrackbarPos", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.setTrackbarPos", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.setTrackbarPos", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 41, "usage_type": "attribute"}, {"api_name": "cv2.inRange", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.medianBlur", "line_number": 47, "usage_type": "call"}, {"api_name": "cv2.Canny", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.HoughLinesP", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 53, "usage_type": "attribute"}, {"api_name": "cv2.line", "line_number": 60, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 67, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 68, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 69, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 71, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "217139261", "text": "#!/usr/bin/python3\nimport sys\nimport csv\nfrom multiprocessing import Process,Queue\nfrom getopt import getopt\nimport sys\nfrom configparser import ConfigParser\nfrom datetime import datetime\nuser_p = Queue()\ncal_p = Queue()\nnew_data = []\nclass Args:\n\tdef __init__(self):\n\t\tself.args = sys.argv[1:]\n\t\tself.pas_path()\n\tdef pas_path(self):\n\t\topts,args = getopt(self.args,'-hC:c:d:o:',['help'])\n\t\tfor opt_name,opt_value in opts:\n\t\t\tif opt_name in ['-h','--help']:\n\t\t\t\tprint('help')\n\t\t\t\tsys.exit(0)\n\t\t\tif opt_name in '-C':\n\t\t\t\tself.city = opt_value.upper()\n\t\t\tif opt_name in '-c':\n\t\t\t\tself.config = opt_value\n\t\t\tif opt_name in '-d':\n\t\t\t\tself.d_src = opt_value\n\t\t\tif opt_name in '-o':\n\t\t\t\tself.o_src = opt_value\nclass Config:\n\tdef __init__(self,args):\n\t\tself.args = args\n\t\tself.config = self._read_config()\n\tdef _read_config(self):\n\t\tconfig = {'rate':0}\n\t\tcon = ConfigParser()\n\t\tcon.read(self.args.config)\n\t\tfor cf in con.options(self.args.city):\n\t\t\tif cf == 'jishul' or cf == 'jishuh':\n\t\t\t\tconfig[cf] = con.getfloat(self.args.city,cf)\n\t\t\telse:\n\t\t\t\tconfig['rate'] += con.getfloat(self.args.city,cf)\n\t\tprint(config['rate'],type(config['rate']))\n\t\treturn config\ndef user_id(args,queuep):\n\twith open(args.d_src) as f:\n\t\tdata = list(csv.reader(f))\n\tqueuep.put(data)\n\ndef cal_data_pro(args,queuep,cal_p):\n\tdata = queuep.get()\n\tprint(args.config['rate'],type(args.config['rate']))\n\tfor id,salary in data:\n\t\tsalary = int(salary)\n\t\tshebao = salary * args.config['rate']\n\t\tif salary > args.config['jishuh']:\n\t\t\tshebao = args.config['jishuh'] * args.config['rate']\n\t\tif salary < args.config['jishul']:\n\t\t\tshebao = args.config['jishul'] * args.config['rate']\n\t\tm = salary - shebao - 3500\n\t\tif m <= 0:\n\t\t\tamount = 0\n\t\telif m <= 1500:\n\t\t\tamount = m * 0.03 - 0\n\t\telif 1500 < m <= 4500:\n\t\t\tamount = m * 0.1 - 105\n\t\telif 4500 < m <= 9000:\n\t\t\tamount = m * 0.2 - 555\n\t\telif 9000 < m <= 35000:\n\t\t\tamount = m * 0.25 - 1005\n\t\telif 35000 < m <= 55000:\n\t\t\tamount = m * 0.3 - 2755\n\t\telif 55000 < m <= 80000:\n\t\t\tamount = m * 0.35 - 5505\n\t\telse:\n\t\t\tamount = m * 0.45 - 13505\n\t\tafter_tax = salary - shebao - amount\n\t\tresult = [id,salary,format(shebao,'.2f'),format(amount,'.2f'),format(after_tax,'.2f'),datetime.now().strftime('%Y-%m-%d %H:%M:%S')]\n\t\tnew_data.append(result)\n\tcal_p.put(new_data)\n\ndef write_config(args,cal_p):\n\tdata = cal_p.get()\n\tprint(args.o_src)\n\twith open(args.o_src,'w') as f:\n\t\tfor i in data:\n\t\t\tcsv.writer(f).writerow(i)\n\ndef main():\n\tProcess(target=user_id,args=(args,user_p)).start()\n\tProcess(target=cal_data_pro,args=(config,user_p,cal_p)).start()\n\tProcess(target=write_config,args=(args,cal_p)).start()\nif __name__ == \"__main__\":\n\targs = Args()\n\tconfig = Config(args)\n\tmain()\n", "sub_path": "calculator.py", "file_name": "calculator.py", "file_ext": "py", "file_size_in_byte": 2668, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "multiprocessing.Queue", "line_number": 9, "usage_type": "call"}, {"api_name": "multiprocessing.Queue", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 14, "usage_type": "attribute"}, {"api_name": "getopt.getopt", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 21, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 36, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 47, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 78, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 78, "usage_type": "name"}, {"api_name": "csv.writer", "line_number": 87, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 90, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 91, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 92, "usage_type": "call"}]} +{"seq_id": "50118989", "text": "import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom data_loading.process_air_quality_official import data_for_heatmap as data_for_heatmap_official\n\nif __name__ == \"__main__\":\n\n '''\n OFFICIAL DATA\n '''\n\n data_official = pd.read_csv('../../datathlon data/air-quality-official/Processed_heatmap_all.csv')\n # print(data.head())\n # print(data.shape)\n # print(data.columns)\n couple_columns_official = data_official[['Concentration', 'Longitude', 'Latitude']]\n # print(couple_columns.head())\n # print(data.ix[:, ['Concentration', 'Longitude', 'Latitude']].head())\n data_lat_long_official = couple_columns_official.groupby(['Latitude', 'Longitude']).mean()\n # print(data_lat_long.shape)\n # print(data_lat_long.head(10))\n\n data_lat_long_official = data_lat_long_official.reset_index()\n # print(data_lat_long.head())\n # major_ticks = np.arange(0.2, 0.5, 0.01)\n # minor_ticks = np.arange(0, 50, 1)\n\n '''\n CITIZEN DATA\n '''\n\n data_citizen = pd.read_csv('../../datathlon data/air-quality-citizen/Processed_heatmap_all_citizen.csv')\n # print(data.head())\n # print(data.shape)\n # print(data.columns)\n couple_columns_citizen = data_citizen[['Concentration', 'Longitude', 'Latitude']]\n # print(couple_columns.head())\n # print(data.ix[:, ['Concentration', 'Longitude', 'Latitude']].head())\n data_lat_long_citizen = couple_columns_citizen.groupby(['Latitude', 'Longitude']).mean()\n # print(data_lat_long.shape)\n # print(data_lat_long.head(10))\n\n data_lat_long_citizen = data_lat_long_citizen.reset_index()\n # print(data_lat_long.head())\n # major_ticks = np.arange(0.2, 0.5, 0.01)\n # minor_ticks = np.arange(0, 50, 1)\n\n '''\n PLOT\n '''\n\n fig = plt.figure(figsize=(6, 5))\n ax = fig.add_subplot(1, 1, 1)\n s_1 = ax.scatter('Latitude', 'Longitude', c = 'Concentration', data = data_lat_long_official, cmap = 'RdYlGn_r', marker = 's', s = 190)\n ax.axis([\n data_lat_long_citizen['Latitude'].min() - 0.05,\n data_lat_long_citizen['Latitude'].max() + 0.05,\n data_lat_long_citizen['Longitude'].min() - 0.05,\n data_lat_long_citizen['Longitude'].max() + 0.05\n ])\n ax.grid(which='both', alpha = 0.3)\n ax.grid(which='major', alpha = 0.3)\n ax.set_xlabel('Latitude', fontsize = 10);\n ax.set_ylabel('Longitude', fontsize = 10);\n ax.set_title('Concentration', size = 15)\n\n clip_config = {\n 'Concentration': data_lat_long_official['Concentration'].max(),\n 'Latitude': data_lat_long_citizen['Latitude'].max(),\n 'Longitude': data_lat_long_citizen['Longitude'].max()\n }\n data_lat_long_citizen = data_lat_long_citizen.clip(upper = pd.Series(clip_config), axis=1)\n\n cbar = plt.colorbar(mappable = s_1, ax = ax)\n\n s_2 = ax.scatter('Latitude', 'Longitude', c='Concentration', data=data_lat_long_citizen, cmap='RdYlGn_r', marker='.', s=50)\n # cbar = plt.colorbar(mappable=s_2, ax=ax)\n\n plt.show()", "sub_path": "heatmap/heatmap_citizen_official.py", "file_name": "heatmap_citizen_official.py", "file_ext": "py", "file_size_in_byte": 2995, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pandas.read_csv", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "pandas.Series", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}]} +{"seq_id": "527443425", "text": "import json\nimport time\nimport random\nfrom uuid import uuid4\nfrom PIL import Image\nimport moviepy.editor as mp\n\nfrom . import config\nfrom .extractors import extract_media_v1\nfrom .exceptions import ClientError, PrivateError\n\n\nclass IGTVNotUpload(PrivateError):\n pass\n\n\nclass IGTVConfigureError(IGTVNotUpload):\n pass\n\n\nclass DownloadIGTV:\n def igtv_download(self, media_pk: int, folder: str = \"/tmp\") -> str:\n return self.video_download(media_pk, folder)\n\n def igtv_download_by_url(self, url: str, filename: str = \"\", folder: str = \"/tmp\") -> str:\n return self.video_download_by_url(url, filename, folder)\n\n\nclass UploadIGTV:\n def igtv_upload(\n self,\n filepath: str,\n title: str,\n caption: str,\n thumbnail: str = None,\n usertags: list = [],\n configure_timeout: str = 10,\n ) -> dict:\n \"\"\"Upload IGTV to Instagram\n\n :param filepath: Path to IGTV file (String)\n :param title: Media title (String)\n :param caption: Media description (String)\n :param thumbnail: Path to thumbnail for IGTV (String). When None, then\n thumbnail is generate automatically\n :param configure_timeout: Timeout between attempt to configure media (set caption and title)\n\n :return: Object with state of uploading to Instagram (or False)\n \"\"\"\n assert isinstance(filepath, str), \"Filepath must been string, now %s\" % filepath\n upload_id = str(int(time.time() * 1000))\n thumbnail, width, height, duration = analyze_video(filepath, thumbnail)\n waterfall_id = str(uuid4())\n # upload_name example: '1576102477530_0_7823256191'\n upload_name = \"{upload_id}_0_{rand}\".format(\n upload_id=upload_id, rand=random.randint(1000000000, 9999999999)\n )\n # by segments bb2c1d0c127384453a2122e79e4c9a85-0-6498763\n # upload_name = \"{hash}-0-{rand}\".format(\n # hash=\"bb2c1d0c127384453a2122e79e4c9a85\", rand=random.randint(1111111, 9999999)\n # )\n rupload_params = {\n \"is_igtv_video\": \"1\",\n \"retry_context\": '{\"num_step_auto_retry\":0,\"num_reupload\":0,\"num_step_manual_retry\":0}',\n \"media_type\": \"2\",\n \"xsharing_user_ids\": json.dumps([self.user_id]),\n \"upload_id\": upload_id,\n \"upload_media_duration_ms\": str(int(duration * 1000)),\n \"upload_media_width\": str(width),\n \"upload_media_height\": str(height),\n }\n headers = {\n \"Accept-Encoding\": \"gzip\",\n \"X-Instagram-Rupload-Params\": json.dumps(rupload_params),\n \"X_FB_VIDEO_WATERFALL_ID\": waterfall_id,\n \"X-Entity-Type\": \"video/mp4\",\n }\n response = self.private.get(\n \"https://{domain}/rupload_igvideo/{name}\".format(\n domain=config.API_DOMAIN, name=upload_name\n ), headers=headers\n )\n self.request_log(response)\n if response.status_code != 200:\n raise IGTVNotUpload(response=self.last_response, **self.last_json)\n igtv_data = open(filepath, \"rb\").read()\n igtv_len = str(len(igtv_data))\n headers = {\n \"Offset\": \"0\",\n \"X-Entity-Name\": upload_name,\n \"X-Entity-Length\": igtv_len,\n \"Content-Type\": \"application/octet-stream\",\n \"Content-Length\": igtv_len,\n **headers\n }\n response = self.private.post(\n \"https://{domain}/rupload_igvideo/{name}\".format(\n domain=config.API_DOMAIN, name=upload_name\n ),\n data=igtv_data, headers=headers\n )\n self.request_log(response)\n if response.status_code != 200:\n raise IGTVNotUpload(response=self.last_response, **self.last_json)\n # CONFIGURE\n self.igtv_composer_session_id = self.generate_uuid()\n for attempt in range(20):\n self.logger.debug(\"Attempt #%d to configure IGTV: %s\", attempt, filepath)\n time.sleep(configure_timeout)\n try:\n configured = self.igtv_configure(\n upload_id, thumbnail, width, height, duration, title, caption, usertags\n )\n except ClientError as e:\n if \"Transcode not finished yet\" in str(e):\n \"\"\"\n Response 202 status:\n {\"message\": \"Transcode not finished yet.\", \"status\": \"fail\"}\n \"\"\"\n time.sleep(10)\n continue\n raise e\n else:\n if configured:\n media = self.last_json.get(\"media\")\n self.expose()\n return extract_media_v1(media)\n raise IGTVConfigureError(response=self.last_response, **self.last_json)\n\n def igtv_configure(\n self,\n upload_id: str,\n thumbnail: str,\n width: int,\n height: int,\n duration: int,\n title: str,\n caption: str,\n usertags: list\n ) -> bool:\n \"\"\"Post Configure IGTV (send caption, thumbnail and more to Instagram)\n\n :param upload_id: Unique upload_id (String)\n :param thumbnail: Path to thumbnail for igtv (String)\n :param width: Width in px (Integer)\n :param height: Height in px (Integer)\n :param duration: Duration in seconds (Integer)\n :param caption: Media description (String)\n \"\"\"\n self.photo_rupload(thumbnail, upload_id)\n usertags = [\n {\"user_id\": tag['user']['pk'], \"position\": tag['position']}\n for tag in usertags\n ]\n data = {\n \"igtv_ads_toggled_on\": \"0\",\n \"filter_type\": \"0\",\n \"timezone_offset\": \"10800\",\n \"media_folder\": \"ScreenRecorder\",\n \"source_type\": \"4\",\n \"title\": title,\n \"caption\": caption,\n \"usertags\": json.dumps({\"in\": usertags}),\n \"date_time_original\": time.strftime(\"%Y%m%dT%H%M%S.000Z\", time.localtime()),\n \"igtv_share_preview_to_feed\": \"1\",\n \"upload_id\": upload_id,\n \"igtv_composer_session_id\": self.igtv_composer_session_id,\n \"device\": self.device,\n \"length\": duration,\n \"clips\": [{\"length\": duration, \"source_type\": \"4\"}],\n \"extra\": {\"source_width\": width, \"source_height\": height},\n \"audio_muted\": False,\n \"poster_frame_index\": 70,\n }\n return self.private_request(\n \"media/configure_to_igtv/?video=1\",\n self.with_default_data(data),\n with_signature=True,\n )\n\n\ndef analyze_video(filepath: str, thumbnail: str = None) -> tuple:\n \"\"\"Analyze and crop thumbnail if need\n \"\"\"\n print(f'Analizing IGTV file \"{filepath}\"')\n video = mp.VideoFileClip(filepath)\n width, height = video.size\n if not thumbnail:\n thumbnail = f\"{filepath}.jpg\"\n print(f'Generating thumbnail \"{thumbnail}\"...')\n video.save_frame(thumbnail, t=(video.duration / 2))\n crop_thumbnail(thumbnail)\n return thumbnail, width, height, video.duration\n\n\ndef crop_thumbnail(filepath):\n \"\"\"Crop IGTV thumbnail with save height\n \"\"\"\n im = Image.open(filepath)\n width, height = im.size\n offset = (height / 1.78) / 2\n center = width / 2\n # Crop the center of the image\n im = im.crop((center - offset, 0, center + offset, height))\n im.save(open(filepath, \"w\"))\n im.close()\n return True\n", "sub_path": "instagrapi/igtv.py", "file_name": "igtv.py", "file_ext": "py", "file_size_in_byte": 7623, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "exceptions.PrivateError", "line_number": 13, "usage_type": "name"}, {"api_name": "time.time", "line_number": 51, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 53, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 56, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 66, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 74, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 109, "usage_type": "call"}, {"api_name": "exceptions.ClientError", "line_number": 114, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 120, "usage_type": "call"}, {"api_name": "extractors.extract_media_v1", "line_number": 127, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 163, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 164, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 164, "usage_type": "call"}, {"api_name": "moviepy.editor.VideoFileClip", "line_number": 186, "usage_type": "call"}, {"api_name": "moviepy.editor", "line_number": 186, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 199, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 199, "usage_type": "name"}]} +{"seq_id": "585365586", "text": "import os\nimport sys\nimport xarray as xr\nimport numpy as np\nfrom pathlib import Path\n\n# Import library specific modules\nsys.path.append(\"../../../\")\nfrom pyspod.spod_low_storage import SPOD_low_storage\nfrom pyspod.spod_low_ram import SPOD_low_ram\nfrom pyspod.spod_streaming import SPOD_streaming\nimport pyspod.weights as weights\n\n# Current path\nCWD = os.getcwd()\n\n# Inspect and load data\nfile = os.path.join(CWD,'E20C_MONTHLYMEAN00_1900_2010_U131128_3D.nc')\nds = xr.open_dataset(file)\nprint(ds)\n\n# we extract time, longitude and latitude\nt = np.array(ds['time'])\nx1 = np.array(ds['longitude'])\nx2 = np.array(ds['latitude'])\nx3 = np.array(ds['level'])\nprint('shape of t (time): ', t.shape)\nprint('shape of x1 (longitude): ', x1.shape)\nprint('shape of x2 (latitude) : ', x2.shape)\nprint('shape of x3 (level) : ', x3.shape)\n\n# we set the variables we want to use for the analysis\n# (we select all the variables present) and load the in RAM\nvariables = ['u']\nX = np.empty([t.shape[0], x1.shape[0], x2.shape[0], x3.shape[0], len(variables)])\nfor i,var in enumerate(variables):\n X[...,i] = np.einsum('tijk->tkji', np.array(ds[var]))\n X[...,i] = np.nan_to_num(X[...,i])\nprint('shape of data matrix X: ', X.shape)\n\n# define required and optional parameters\nparams = dict()\n\n# required parameters\nparams['dt' ] = 744 # data time-sampling\nparams['nt' ] = t.shape[0] # number of time snapshots\nparams['xdim' ] = X[0,...,0].ndim # number of spatial dimensions (longitude and latitude)\nparams['nv' ] = len(variables) # number of variables\nparams['n_FFT' ] = np.ceil(12 * 12) # length of FFT blocks\nparams['n_freq' ] = params['n_FFT'] / 2 + 1 # number of frequencies\nparams['n_overlap' ] = np.ceil(params['n_FFT'] * 0 / 100) # dimension block overlap region\nparams['mean' ] = 'longtime' # type of mean to subtract to the data\nparams['normalize' ] = False # normalization of weights by data variance\nparams['savedir' ] = os.path.join(CWD, 'results', Path(file).stem) # folder where to save results\nparams['weights'] = weights.geo_weights_trapz_3D(\\\n lat=x2,\n lon=x1,\n R=1,\n z=x3,\n n_vars=params['nv']) # weights\n\n# optional parameters\nparams['savefreqs' ] = np.arange(0,params['n_freq']) # frequencies to be saved\nparams['n_modes_save'] = 5 # modes to be saved\nparams['normvar' ] = False # normalize data by data variance\nparams['conf_level' ] = 0.95 # calculate confidence level\nparams['savefft' ] = False # save FFT blocks to reuse them in the future (saves time)\n\n\n# Perform SPOD analysis using low storage module\nSPOD_analysis = SPOD_low_ram(data=X, params=params, data_handler=False, variables=variables)\nspod = SPOD_analysis.fit()\n\n# Show results\nT_approx = 744 # approximate period (in days)\nfreq_found, freq_idx = spod.find_nearest_freq(freq_required=1/T_approx, freq=spod.freq)\nmodes_at_freq = spod.get_modes_at_freq(freq_idx=freq_idx)\n\nfreq = spod.freq*24\nspod.plot_eigs()\nspod.plot_eigs_vs_frequency(freq=freq)\nspod.plot_eigs_vs_period (freq=freq, xticks=[1, 7, 30, 365, 740, 1825])\nspod.plot_3D_modes_slice_at_frequency(\n freq_required=freq_found,\n freq=freq,\n x1=x1-180,\n x2=x2,\n x3=x3,\n slice_dim=2,\n slice_id=2,\n coastlines='centred', modes_idx=[0,1,2], vars_idx=[0])\nspod.plot_mode_tracers(\n freq_required=freq_found,\n freq=freq,\n coords_list=[(100,0,2)],\n modes_idx=[0,1,2])\nspod.plot_data_tracers(coords_list=[(100,0,2),(200,10,10)])\n", "sub_path": "tutorials/climate/ERA20C_QBO_3D/ERA20C_QBO_3D.py", "file_name": "ERA20C_QBO_3D.py", "file_ext": "py", "file_size_in_byte": 3517, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "sys.path.append", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "xarray.open_dataset", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.einsum", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.nan_to_num", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 54, "usage_type": "call"}, {"api_name": "pyspod.weights.geo_weights_trapz_3D", "line_number": 55, "usage_type": "call"}, {"api_name": "pyspod.weights", "line_number": 55, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 63, "usage_type": "call"}, {"api_name": "pyspod.spod_low_ram.SPOD_low_ram", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "601905008", "text": "# Mark Bauman 2015\n##Calculates the rmse for each file in parallel based on specified num trees in filename\n#First run https://github.com/markleeb/SEAC4RS/Optimal_num_estimators/RandomForestOptimalNumTrees_1.py to write files\n\nimport pandas as pd\nimport numpy as np\nimport glob\nimport os, sys\n\n#import df\ndf = pd.read_csv('...path....csv', index_col=0)\n\n## Standardize the data for each feature to have a mean of 0, and std of 1\ndf = (df - df.mean()) / df.std()\ndf = df.dropna(axis = 1, how = 'all')\n\nfilePath = '...path.../num_trees*' #initialize filepath\n\ndef rfRegrOptTreesParCalc(filePath, data, target):\n from random import shuffle\n from sklearn.ensemble import RandomForestRegressor \n\n fileList = glob.glob(filePath) #List of files in directory\n fileList.sort() #Sort list so that .job.claimed addition will show up right after the .job file\n\n #create random index = number of files in fileList\n index = range(len(fileList)) \n shuffle(index)\n\n\n #Loop through each file randomly\n for n in index:\n fn = fileList[n]\n if os.path.isfile(fn):\n if fn[-7:] == 'claimed':\n continue\n \n os.rename(fn, fn.replace('.job', '.job.claimed')) #renames file, not fn variable name\n print('working on ' + fn)\n fn = fn.replace('.job', '.job.claimed') #renames fn variable name\n\n #split filename and retrieve number of trees to use \n fn = fn.split(':')\n numTrees = int(fn[1])\n\n #Inner CV loop with 10 iterations to calc avg rmse -- can make this number whatever you want\n rmseSum = 0\n for i in range(10):\n #split data into test and train sets, and calculate rmse with given number of trees\n msk = np.random.rand(len(data)) < 0.6\n\n train = data[msk]\n test = data[~msk]\n\n test_y = test[target]\n test_X = test.drop(target, 1)\n \n train_y = train[target]\n train_X = train.drop(target, 1)\n\n #Initialize and train the model\n regr = RandomForestRegressor(n_estimators = numTrees+10) \n regr.fit(train_X, train_y)\n\n #Calculate rmse\n rmseSum = rmseSum + np.sqrt(((regr.predict(test_X) - test_y) ** 2).mean())\n\n print('inner CV loop iteration: ' + str(1 + i))\n \n rmse = rmseSum/(i+1)\n fn = ':'.join(fn)\n os.rename(fn, fn.replace('rmse', str(rmse))) #add rmse calculation to filename \n \n\n#run it\nrfRegrOptTreesParCalc(filePath, df, '...target...')\n", "sub_path": "Optimal_num_estimators/RandomForestOptimalNumTrees_2.py", "file_name": "RandomForestOptimalNumTrees_2.py", "file_ext": "py", "file_size_in_byte": 2767, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pandas.read_csv", "line_number": 11, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 23, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.rename", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 50, "usage_type": "attribute"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 66, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "471597916", "text": "\"\"\"OpenAQ Air Quality Dashboard with Flask.\"\"\"\nfrom flask import Flask, request\nfrom flask_sqlalchemy import SQLAlchemy\nfrom decouple import config\nfrom os import getenv\nimport openaq\n\n\nAPP = Flask(__name__)\nAPP.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'\nDB = SQLAlchemy(APP)\n\n\nAPI = openaq.OpenAQ()\nmment = API.measurements(city='Los Angeles', parameter='pm25')\nbody = mment[1]\ndef LAquery(k):\n LAresults = body['results']\n values = []\n for k in LAresults:\n kvalue = k.get('value')\n kdate = k.get('date')\n kutc = kdate.get('utc')\n values.append((kvalue, kutc))\n return values\n\nclass Record(DB.Model):\n id = DB.Column(DB.Integer, primary_key=True)\n datetime = DB.Column(DB.String(25))\n value = DB.Column(DB.Float, nullable=False)\n\n def __repr__(self):\n return f\"\"\n\n@APP.route('/')\ndef root():\n \"\"\"Base view.\"\"\"\n records = Record.query.filter(Record.value>=10).all()\n res=''\n for rec in records:\n res += 'datetime = '+ rec.datetime\n res += \", \"\n res += 'value = '+ str(rec.value)\n res += '
'\n return res\n\n\n@APP.route('/refresh')\ndef refresh():\n \"\"\"Pull fresh data from Open AQ and replace existing data.\"\"\"\n DB.drop_all()\n DB.create_all()\n API_items = body['results']\n for i in API_items:\n ivalue = i.get('value')\n idate = i.get('date')\n iutc = idate.get('utc')\n db_item = (Record(datetime=iutc, value=ivalue))\n DB.session.add(db_item)\n DB.session.commit()\n return 'Data refreshed!'\n\nif __name__ == \"__main__\":\n APP.run()\n", "sub_path": "sprint-challenge/aq_dashboard.py", "file_name": "aq_dashboard.py", "file_ext": "py", "file_size_in_byte": 1662, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 11, "usage_type": "call"}, {"api_name": "openaq.OpenAQ", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "496506924", "text": "from nsl.notifications import emailer_nsl_internal\nfrom nsl.app_settings import LMAP_ALLOWED_NSLS, LMAP_REJECTED_NSLS, \\\n NSL_DELAY_TIMEOUT_CACHE_KEY, NSL_DELAY_CHECK_CACHE_KEY\nfrom package.app_settings import LMAP_STREAMS, DEFAULT_NSL_KEYS\nfrom dvutils.utils import push_lmap_data_to_kinesis, push_fe_data_to_kinesis\nfrom lmap.app_settings import LMAP_STRUCTURE_NAME\nfrom django.core.cache import cache\n\ndef notifications_signal_handler(sender, instance, created, **kwargs):\n \"\"\"\n Signal the notification handler to send out notifications as applicable\n \"\"\"\n # Currenlty only taking care of internal emails\n # Will modify according to other notification needs.\n emailer_nsl_internal(instance)\n\n\ndef send_nsl_to_fe(instance):\n push_url = LMAP_STREAMS.get('NSL_DATA')\n if instance.code_type == 'EOD':\n push_fe_data_to_kinesis(push_url, '{}'.format(instance.full_code),\n instance.serializer(fields=DEFAULT_NSL_KEYS))\n\n\ndef send_nsl_to_lm(instance):\n push_url = LMAP_STREAMS.get('NSL_DATA')\n if (instance.code_type == 'EOD' and instance.subcode not in LMAP_REJECTED_NSLS) \\\n or (instance.subcode in LMAP_ALLOWED_NSLS and instance.code == 'X') \\\n or (instance.code == 'L' and instance.subcode == 'PMA'):\n # For LMAP\n push_lmap_data_to_kinesis(push_url, '{}'.format(instance.full_code),\n instance.serializer(fields=DEFAULT_NSL_KEYS),\n event='nslcode_update', client=LMAP_STRUCTURE_NAME)\n\n\ndef clear_delay_timeout_cache(instance):\n \"\"\"\n Post save receiver for clearing the \n on delay expiration time cache and\n delay expiration check cache\n \"\"\"\n # delete both the caches on save\n # so that its populated anew when\n # request for the first time after\n # saving an NSL\n cache.delete(NSL_DELAY_CHECK_CACHE_KEY)\n cache.delete(NSL_DELAY_TIMEOUT_CACHE_KEY)\n", "sub_path": "nsl/receivers.py", "file_name": "receivers.py", "file_ext": "py", "file_size_in_byte": 1972, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "nsl.notifications.emailer_nsl_internal", "line_number": 15, "usage_type": "call"}, {"api_name": "package.app_settings.LMAP_STREAMS.get", "line_number": 19, "usage_type": "call"}, {"api_name": "package.app_settings.LMAP_STREAMS", "line_number": 19, "usage_type": "name"}, {"api_name": "dvutils.utils.push_fe_data_to_kinesis", "line_number": 21, "usage_type": "call"}, {"api_name": "package.app_settings.DEFAULT_NSL_KEYS", "line_number": 22, "usage_type": "name"}, {"api_name": "package.app_settings.LMAP_STREAMS.get", "line_number": 26, "usage_type": "call"}, {"api_name": "package.app_settings.LMAP_STREAMS", "line_number": 26, "usage_type": "name"}, {"api_name": "nsl.app_settings.LMAP_REJECTED_NSLS", "line_number": 27, "usage_type": "name"}, {"api_name": "nsl.app_settings.LMAP_ALLOWED_NSLS", "line_number": 28, "usage_type": "name"}, {"api_name": "dvutils.utils.push_lmap_data_to_kinesis", "line_number": 31, "usage_type": "call"}, {"api_name": "package.app_settings.DEFAULT_NSL_KEYS", "line_number": 32, "usage_type": "name"}, {"api_name": "lmap.app_settings.LMAP_STRUCTURE_NAME", "line_number": 33, "usage_type": "name"}, {"api_name": "django.core.cache.cache.delete", "line_number": 46, "usage_type": "call"}, {"api_name": "nsl.app_settings.NSL_DELAY_CHECK_CACHE_KEY", "line_number": 46, "usage_type": "argument"}, {"api_name": "django.core.cache.cache", "line_number": 46, "usage_type": "name"}, {"api_name": "django.core.cache.cache.delete", "line_number": 47, "usage_type": "call"}, {"api_name": "nsl.app_settings.NSL_DELAY_TIMEOUT_CACHE_KEY", "line_number": 47, "usage_type": "argument"}, {"api_name": "django.core.cache.cache", "line_number": 47, "usage_type": "name"}]} +{"seq_id": "319575501", "text": "import pickle\nimport matplotlib.pyplot as plt\n\nwith open(\"optimization.p\", \"rb\") as f:\n data = pickle.load(f)\n\n for index, mode in enumerate([\"car\", \"pt\", \"bike\", \"walk\"]):\n plt.plot(data[0][\"information\"][\"region_reference_shares\"][mode], color = \"C%d\" % index, linestyle = \":\")\n plt.plot(data[0][\"information\"][\"region_simulation_shares\"][mode], color = \"C%d\" % index, linestyle = \"-\")\n\nplt.show()\n", "sub_path": "example/paris/check.py", "file_name": "check.py", "file_ext": "py", "file_size_in_byte": 420, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pickle.load", "line_number": 5, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "478136470", "text": "import os.path\nimport json\nimport argparse\nimport sys\n\nfrom db import db_session, PizzaType, PizzaChoice\n\n\ndef load_json_data(filepath):\n if not os.path.exists(filepath):\n return None\n with open(filepath, 'r') as file:\n return json.load(file)\n\n\ndef parse_command_line_arguments():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n '--filepath',\n help='a JSON file with pizzas info for load into the database '\n '(default value: pizzas_info.json)',\n default='pizzas_info.json',\n type=str,\n )\n command_line_arguments = parser.parse_args()\n\n return command_line_arguments\n\n\ndef get_pizza_choices(pizza_choices_info):\n return [\n PizzaChoice(**pizza_choice_info)\n for pizza_choice_info in pizza_choices_info\n ]\n\n\ndef load_to_database(pizzas_info):\n for pizza_info in pizzas_info:\n pizza_type = PizzaType(\n title=pizza_info['title'],\n description=pizza_info['description'],\n )\n pizza_type.choices = get_pizza_choices(pizza_info['choices'])\n\n db_session.add(pizza_type)\n\n db_session.commit()\n\n\ndef main():\n command_line_arguments = parse_command_line_arguments()\n\n filepath = command_line_arguments.filepath\n\n try:\n pizzas_info = load_json_data(filepath)\n except (UnicodeDecodeError, json.JSONDecodeError):\n sys.exit('JSON file has invalid format')\n\n if pizzas_info is None:\n sys.exit('JSON file not found')\n\n load_to_database(pizzas_info)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "load_to_db.py", "file_name": "load_to_db.py", "file_ext": "py", "file_size_in_byte": 1573, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "os.path.path.exists", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 10, "usage_type": "name"}, {"api_name": "json.load", "line_number": 13, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 17, "usage_type": "call"}, {"api_name": "db.PizzaChoice", "line_number": 33, "usage_type": "call"}, {"api_name": "db.PizzaType", "line_number": 40, "usage_type": "call"}, {"api_name": "db.db_session.add", "line_number": 46, "usage_type": "call"}, {"api_name": "db.db_session", "line_number": 46, "usage_type": "name"}, {"api_name": "db.db_session.commit", "line_number": 48, "usage_type": "call"}, {"api_name": "db.db_session", "line_number": 48, "usage_type": "name"}, {"api_name": "json.JSONDecodeError", "line_number": 58, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 59, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "617498792", "text": "# -*- coding: utf-8 -*-\n# Copyright 2019 Huairuo.ai.\n# Author: Lin Li (li.lin@huairuo.ai)\n#\n# cython: language_level=3\n#\nimport lib.config as cf\nimport torch\nimport torch.nn.functional as f\nfrom pytorch_transformers import *\nfrom lib.utils import load_class, gelu\nimport math\ndef test():\n import datetime\n print((datetime.datetime(2010,3, 1) - datetime.datetime(2010,2, 1)).days)\n import calendar\n print(calendar.monthrange(2010,2))\n\ndef test_expand():\n a = torch.randn(2,2)\n print(a.shape)\n print(a)\n b = a.unsqueeze(1).expand(2,3,2)\n # b = a.expand(2,2)\n print(b.shape)\n print(b)\n\ndef test_mul():\n a = torch.Tensor([1,1])\n b = torch.Tensor([[2],[2],[3]])\n print(a,b)\n print(torch.mul(a,b))\n\n\ndef test_cat():\n a = torch.randn(3,3,3)\n b = torch.randn(3,3,3)\n c = torch.cat([a,b], dim=2)\n print(c.shape)\n print(c)\n\ndef test_para():\n import math\n from torch import nn\n d_model = 3\n w = torch.empty(d_model * 3)\n lim = 1 / d_model\n nn.init.uniform_(w, -math.sqrt(lim), math.sqrt(lim))\n ww = nn.Parameter(w)\n print(ww)\n\ndef test_bert():\n from pytorch_transformers import BertTokenizer\n from lib.handler import corresponds_index, transfer_index\n tokenizer = BertTokenizer.from_pretrained(\"./data/model/bert\")\n text = \"华为nova6系列105°前置超广角双摄了解下\"\n token = tokenizer.tokenize(text)\n text = list(text)\n print(text)\n print(token)\n corr_index = corresponds_index(text, token)\n print(corr_index)\n for index, val in enumerate(text):\n index_in_token = transfer_index(text, token, index)[0]\n print(f\"index_in_token: {index_in_token}\")\n val_in_token = token[index_in_token] if index_in_token>-1 else \"\"\n print(index, val, index_in_token, val_in_token)\n # print(transfer_index(text, token, 2, 3, 9, 10))\n\n\ndef test_class():\n class A:\n def __init__(self):\n self.a=1\n self.b = 2\n\n def __getitem__(self, item):\n return item\n\n # def test(self):\n # co = self.__init__.func_code\n # print(co.co_name)\n\n\n # print(dir(A()))\n a = A()\n print(a(2,2))\n # print(A().test())\n\ndef te_soft_max():\n import torch.nn as nn\n\n m = nn.Softmax(dim=0)\n n = nn.Softmax(dim=1)\n k = nn.Softmax(dim=2)\n j = nn.Softmax(dim=-1)\n l = nn.Softmax(dim=-2)\n input = torch.randn(2, 2, 3)\n print(input)\n print(m(input))\n print(n(input))\n print(k(input))\n print(j(input))\n print(l(input))\n\ndef test_mask():\n a = torch.Tensor(3,3,3)\n mask = torch.Tensor([1,1,0])\n mask = mask.unsqueeze(-1)\n mask = mask.unsqueeze(-1)\n print(mask)\n print(mask.shape)\n print(a.shape)\n print(torch.mul(a, mask))\n\ndef test_init_normal():\n def linear(x,w,b): return x @ w + b\n def relu(x): return x.clamp_min(0.)\n\n x_train = torch.randn(784)\n # x_train = torch.nn.init.normal(x_train)\n print(x_train.mean(), x_train.std())\n nh = 50\n w1 = torch.randn(784, nh)\n b1 = torch.zeros(nh)\n z1 = linear(x_train, w1, b1)\n print(\"norma init=====================>\")\n print(z1.mean(), z1.std())\n\ndef test_init_xavier():\n\n print(\"Xavier init ====================>\")\n def linear(x,w,b): return x @ w + b\n def relu(x): return x.clamp_min(0.)\n\n x_train = torch.randn(784)\n # x_train = torch.nn.init.normal_(x_train)\n print(x_train.mean(), x_train.std())\n nh = 50\n w1 = torch.randn(784, nh) * math.sqrt(1/768)\n b1 = torch.zeros(nh)\n z1 = linear(x_train, w1, b1)\n scale = 1/2\n # print(0, z1.mean(), z1.std())\n z2 = relu(z1)\n print(0, z2.mean(), z2.std())\n # z2 = gelu(z1)\n # print(1, z2.mean(), z2.std())\n # for i in range(20):\n # new_chan = max(int(nh * scale), 1)\n # w2 = torch.randn(nh, new_chan) * math.sqrt(1/nh)\n # b2 = torch.zeros(new_chan)\n # z2 = linear(z1, w2, b2)\n # nh = new_chan\n # z1 = z2\n # print(i+1, z2.mean(), z2.std())\n\n\ndef test_init_kaiming():\n print(\"kaiming init ==============================>\")\n def linear(x,w,b): return x @ w + b\n def relu(x): return x.clamp_min(0.)\n\n x_train = torch.randn(784)\n # x_train = torch.nn.init.normal_(x_train)\n print(x_train.mean(), x_train.std())\n nh = 50\n w1 = torch.randn(784, nh) * math.sqrt(2 / 768)\n b1 = torch.zeros(nh)\n z1 = linear(x_train, w1, b1)\n z2 = relu(z1)\n print(z2.mean(), z2.std())\n z2 = gelu(z1)\n print(z2.mean(), z2.std())\n\ndef test_torch_buildin():\n print(\"torch init ===========>\")\n import torch.nn.init as init\n def linear(x,w,b): return x @ w + b\n def relu(x): return x.clamp_min(0.)\n t_linear = torch.nn.Linear(784, 50)\n\n nh = 50\n x_train = torch.randn(784)\n W1 = torch.zeros(784, nh)\n b1 = torch.zeros(nh)\n W2 = torch.zeros(784, nh)\n b2 = torch.zeros(1)\n\n init.kaiming_normal_(W1, mode='fan_out', nonlinearity='relu')\n init.kaiming_normal_(W2, mode='fan_out', nonlinearity='relu')\n # init.kaiming_normal_(W2, mode='fan_out')\n z1 = t_linear(x_train)\n z2 = linear(x_train, W1, b1)\n z3 = linear(x_train, W2, b1)\n # a1 = torch.relu(z1)\n a1 = relu(z1)\n a2 = relu(z2)\n a3 = relu(z3)\n print(\"a1 layer1: \", a1.mean(), a1.std())\n print(\"a2 layer1: \", a2.mean(), a2.std())\n print(\"a3 layer1: \", a3.mean(), a3.std())\n\ndef test_save_features():\n from lib.handler import get_dataset\n from my_py_toolkit.file.file_toolkit import writejson\n import lib.config as cf\n dataset = get_dataset(\"train\", cf.mode)\n writejson(dataset.convert_all_features4human_visual(), cf.path_save_feature)\n\ndef test_np():\n import numpy as np\n print(np.asarray((1,2)))\n\ndef test_loss():\n import torch.nn.functional as F\n a = torch.zeros(512)\n a[0] = 1\n print(a)\n print(F.nll_loss(1,2))\n\nif __name__ == \"__main__\":\n # test_save_features()\n # test_loss()\n test()\n # test_init_normal()\n # test_init_xavier()\n # test_init_kaiming()\n # test_torch_buildin()\n", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 5644, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "datetime.datetime", "line_number": 15, "usage_type": "call"}, {"api_name": "calendar.monthrange", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.mul", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.empty", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.nn.init.uniform_", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 48, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 48, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 49, "usage_type": "name"}, {"api_name": "pytorch_transformers.BertTokenizer.from_pretrained", "line_number": 55, "usage_type": "call"}, {"api_name": "pytorch_transformers.BertTokenizer", "line_number": 55, "usage_type": "name"}, {"api_name": "lib.handler.corresponds_index", "line_number": 61, "usage_type": "call"}, {"api_name": "lib.handler.transfer_index", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.nn.Softmax", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 93, "usage_type": "name"}, {"api_name": "torch.nn.Softmax", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 94, "usage_type": "name"}, {"api_name": "torch.nn.Softmax", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 95, "usage_type": "name"}, {"api_name": "torch.nn.Softmax", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 96, "usage_type": "name"}, {"api_name": "torch.nn.Softmax", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 97, "usage_type": "name"}, {"api_name": "torch.randn", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 108, "usage_type": "call"}, {"api_name": "torch.mul", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 136, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 140, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 140, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 164, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 168, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 168, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 169, "usage_type": "call"}, {"api_name": "lib.utils.gelu", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 181, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 181, "usage_type": "attribute"}, {"api_name": "torch.randn", "line_number": 184, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 185, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 186, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 187, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 188, "usage_type": "call"}, {"api_name": "torch.nn.init.kaiming_normal_", "line_number": 190, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 190, "usage_type": "name"}, {"api_name": "torch.nn.init.kaiming_normal_", "line_number": 191, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 191, "usage_type": "name"}, {"api_name": "lib.handler.get_dataset", "line_number": 208, "usage_type": "call"}, {"api_name": "lib.config.mode", "line_number": 208, "usage_type": "attribute"}, {"api_name": "lib.config", "line_number": 208, "usage_type": "name"}, {"api_name": "my_py_toolkit.file.file_toolkit.writejson", "line_number": 209, "usage_type": "call"}, {"api_name": "lib.config.path_save_feature", "line_number": 209, "usage_type": "attribute"}, {"api_name": "lib.config", "line_number": 209, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 213, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 217, "usage_type": "call"}, {"api_name": "torch.nn.functional.nll_loss", "line_number": 220, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 220, "usage_type": "name"}]} +{"seq_id": "240186918", "text": "from django.shortcuts import render_to_response, get_object_or_404\r\nfrom django.template import RequestContext\r\nfrom django.contrib.auth.models import User\r\nfrom django.core.urlresolvers import reverse\r\nfrom django.views.generic.list_detail import object_list, object_detail\r\nfrom django.contrib import auth\r\nfrom django.http import HttpResponseRedirect, HttpResponse\r\n\r\nfrom profiles.models import *\r\nfrom course.views import course_page\r\nfrom course.models import *\r\nfrom actstream.models import model_stream\r\nfrom actstream import action\r\nfrom models import *\r\nfrom forms import *\r\n\r\ndef message_page(request, courseNum, courseSlug, msgNum, msg_object=Resource, typeof='resource'):\r\n\t\"\"\" Display a message \"\"\"\r\n\tmsg_info = get_object_or_404(msg_object, pk=msgNum)\r\n\tcourse_info = get_object_or_404(Course, pk=courseNum)\r\n\treturn render_to_response('message_page.html', {'msg_info': msg_info, 'typeof':typeof, 'course_info':course_info}, context_instance=RequestContext(request))\r\n\r\ndef post_message(request, courseNum, courseSlug, msg_object=Resource, typeof='resource', msg_form=ResourceForm, success_url='resource_page'):\r\n\t\"\"\" Create a new message \"\"\"\r\n\t# if the user is logged in, get there profile, and the course, \r\n\t# then check if the user is in the course\r\n\tif not request.user.is_authenticated(): return HttpResponseRedirect(reverse('registration_page'))\r\n\tuser_profile = UserProfile.objects.get(user=request.user)\r\n\tcourse = get_object_or_404(Course, pk=courseNum)\r\n\tif course not in user_profile.enrolled.all(): return HttpResponseRedirect(reverse('course_page', kwargs={'courseNum':courseNum, 'courseSlug':courseSlug}))\t\r\n\t# if this page is not being accessed directly process the form, \r\n\t# else, return them to the course page\r\n\tif request.method == 'POST':\r\n\t\tmessage = msg_object(course=course, author=request.user)\r\n\t\tform = msg_form(request.POST, instance=message)\r\n\t\tif form.is_valid():\r\n\t\t\t# Form is valid, save the new message, redirect to the message's page\r\n\t\t\tnew_message = form.save()\r\n\t\t\treturn HttpResponseRedirect(reverse(success_url, kwargs={'courseNum':courseNum, 'courseSlug':courseSlug, 'msgNum':new_message.pk}))\r\n\telse:\r\n\t\tform = msg_form()\r\n\treturn render_to_response('create_message.html', {'course_info':course, 'form':form, 'typeof':typeof}, context_instance=RequestContext(request))\r\n\r\ndef edit_message(request, courseNum, courseSlug, msgNum, msg_object=Resource, typeof='resource', msg_form=ResourceForm, redirect_url='resource_page'):\r\n\t\"\"\" Edit a message \"\"\"\r\n\t# if the user is logged in, get there profile, and the course, \r\n\t# then check if the user is the author of the message\r\n\tif not request.user.is_authenticated(): return HttpResponseRedirect(redirect_url)\r\n\tuser_profile = UserProfile.objects.get(user=request.user)\r\n\tcourse_info = get_object_or_404(Course, pk=courseNum) \r\n\tmessage = get_object_or_404(msg_object, pk=msgNum)\r\n\tif message.author != user_profile.user: return HttpResponseRedirect(redirect_url)\r\n\t# if this page is not being accessed directly process the form, \r\n\t# else, create a fresh form to use\r\n\tif request.method == 'POST':\r\n\t\tform = msg_form(request.POST, instance=message)\r\n\t\tif form.is_valid():\r\n\t\t\tform.save()\r\n\t\t\treturn HttpResponseRedirect(reverse(redirect_url, kwargs={'courseNum':courseNum, 'courseSlug':courseSlug, 'msgNum':msgNum}))\r\n\telse:\r\n\t\tform = msg_form(instance=message)\r\n\t# display the edit page with a fresh form, or error'd form\r\n\treturn render_to_response('edit_message.html', {'msg_info':message, 'course_info':course_info, 'form':form, 'typeof':typeof}, context_instance=RequestContext(request))\r\n\r\ndef best_answer(request, courseNum, courseSlug, msgNum, commentNum):\r\n\t\"\"\" Select the best answer for a question post \"\"\"\r\n\t# check if the request is coming from the original author, \r\n\t# else redirect back to the question\r\n\tquestion = get_object_or_404(Question, pk=msgNum)\r\n\tif request.user != question.author: return HttpResponseRedirect(reverse('question_page', kwargs={'courseNum':courseNum, 'courseSlug':courseSlug, 'msgNum':msgNum}))\r\n\t# get the comments information, and set it as the best_answer\r\n\tcomment = Comment.objects.get(pk=commentNum)\r\n\tquestion.best_answer = comment\r\n\t# save, and redirect back to the question's page\r\n\tquestion.save()\r\n\treturn HttpResponseRedirect(reverse('question_page', kwargs={'courseNum':courseNum, 'courseSlug':courseSlug, 'msgNum':msgNum}))\r\n\r\n\t\r\n", "sub_path": "astrocourse/discussion/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4391, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.shortcuts.get_object_or_404", "line_number": 19, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 20, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 21, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 21, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 27, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 27, "usage_type": "call"}, {"api_name": "course.views", "line_number": 29, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 29, "usage_type": "call"}, {"api_name": "course.views", "line_number": 30, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 30, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 30, "usage_type": "call"}, {"api_name": "course.views", "line_number": 34, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 39, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 39, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 42, "usage_type": "call"}, {"api_name": "course.views", "line_number": 42, "usage_type": "name"}, {"api_name": "django.template.RequestContext", "line_number": 42, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 48, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 50, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 51, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 52, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 59, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 59, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 63, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 63, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 69, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 70, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 70, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 76, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "285095079", "text": "# -*- coding:utf-8 -*-\n\nfrom sqlite3 import IntegrityError\n# from models import conn\nfrom models import conn\n\n\ndef executeSelectOne(sql):\n\n curs = conn.cursor()\n curs.execute(sql)\n data = curs.fetchone()\n\n return data\n\ndef executeSelectAll(sql):\n\n curs = conn.cursor()\n curs.execute(sql)\n data = curs.fetchall()\n\n return data\n\ndef executeSQL(sql):\n try:\n print('executeSQL = {}'.format(sql))\n curs = conn.cursor()\n curs.execute(sql)\n conn.commit()\n return True\n except IntegrityError:\n return False\n\n\n\n", "sub_path": "models/executeSqlite3.py", "file_name": "executeSqlite3.py", "file_ext": "py", "file_size_in_byte": 574, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "models.conn.cursor", "line_number": 10, "usage_type": "call"}, {"api_name": "models.conn", "line_number": 10, "usage_type": "name"}, {"api_name": "models.conn.cursor", "line_number": 18, "usage_type": "call"}, {"api_name": "models.conn", "line_number": 18, "usage_type": "name"}, {"api_name": "models.conn.cursor", "line_number": 27, "usage_type": "call"}, {"api_name": "models.conn", "line_number": 27, "usage_type": "name"}, {"api_name": "models.conn.commit", "line_number": 29, "usage_type": "call"}, {"api_name": "models.conn", "line_number": 29, "usage_type": "name"}, {"api_name": "sqlite3.IntegrityError", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "190755390", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jan 15 16:49:33 2019\r\n\r\n@author: Silk\r\n\"\"\"\r\n\r\nimport db_info\r\nimport pymysql\r\nimport numpy as np\r\n\r\nclass Data():\r\n def __init__(self):\r\n myhost = db_info.config['host']\r\n myuser = db_info.config['user']\r\n mypassword = db_info.config['password']\r\n mydb = db_info.config['database']\r\n myport = db_info.config['port']\r\n self.db = pymysql.connect(host=myhost,user=myuser,password=mypassword,db=mydb,port=myport)\r\n self.cursor = self.db.cursor()\r\n \r\n self.product_map, self.reverse_map = self.getProductMapping()\r\n self.customer_map = self.getCustomerMapping()\r\n self.category_map = self.getCategoryMapping()\r\n \r\n \r\n \r\n #prepage training data from user_product_count table\r\n def getCountData(self):\r\n trainY = np.zeros([len(self.product_map), len(self.customer_map)])\r\n trainR = np.zeros([len(self.product_map), len(self.customer_map)])\r\n getCountQuery = \"SELECT * FROM rec_user_product_count\"\r\n self.cursor.execute(getCountQuery)\r\n results = self.cursor.fetchall()\r\n for row in results:\r\n customer = row[1]\r\n product = row[2]\r\n count = row[3]\r\n trainY[self.product_map[product] - 1][self.customer_map[customer] - 1] = count\r\n if count > 0:\r\n trainR[self.product_map[product] - 1][self.customer_map[customer] - 1] = 1\r\n print(\"trainning matrix load ready\")\r\n #normalize based on single customer to a 0-10 scale\r\n #base = np.amax(trainX, axis=0)\r\n #trainY = np.where(np.max(trainX, axis=0)==0, trainX, trainX*10./np.max(trainX, axis=0))\r\n #print(base)\r\n return trainY, trainR\r\n \r\n #get viewed products by customer from table report_viewed_product_index\r\n def getViewedData(self):\r\n viewed = np.zeros([len(self.product_map), len(self.customer_map)])\r\n getViewedQuery = \"\"\"SELECT a.customer_id, a.product_id FROM\r\n report_viewed_product_index AS a \r\n INNER JOIN rec_index_product_mapping AS p ON a.product_id=p.product_id\r\n INNER JOIN rec_index_customer_mapping AS c ON a.customer_id=c.customer_id\"\"\"\r\n self.cursor.execute(getViewedQuery)\r\n results = self.cursor.fetchall()\r\n for row in results:\r\n customer = row[0]\r\n product = row[1]\r\n viewed[self.product_map[product] - 1][self.customer_map[customer] - 1] = 1\r\n return viewed\r\n \r\n \r\n #prepage feature data from procat table\r\n def getFeatureData(self):\r\n trainTheta = np.zeros([len(self.product_map), len(self.category_map)])\r\n getProCatQuery = \"SELECT * FROM rec_product_category\"\r\n self.cursor.execute(getProCatQuery)\r\n results = self.cursor.fetchall()\r\n for row in results:\r\n product = row[1]\r\n category = row[2]\r\n trainTheta[self.product_map[product] - 1][self.category_map[category] - 1] = 1\r\n print(\"feature matrix load ready\")\r\n return trainTheta\r\n \r\n '''\r\n store trained product to product distance to db\r\n '''\r\n def storeProd2ProdDist(self, pMatrix):\r\n sqlInsertP2PDistance = \"INSERT INTO rec_product2product_distance (orig_product_id, recomm_product_id, distance) VALUES (%s, %s, %s)\"\r\n print(\"start\")\r\n #print(len(pMatrix))\r\n #print(len(self.reverse_map))\r\n try:\r\n for i in range(len(pMatrix)):\r\n for j in range(len(pMatrix)):\r\n x = i + 1\r\n y = j + 1\r\n if (x not in self.reverse_map or y not in self.reverse_map):\r\n continue\r\n else:\r\n if i == j:\r\n params = [self.reverse_map[x], self.reverse_map[y], 0]\r\n else:\r\n params = [self.reverse_map[x], self.reverse_map[y], pMatrix[i][j]]\r\n #print(str(self.reverse_map[i]) + \"|\" + str(pMatrix[i][j]))\r\n self.cursor.execute(sqlInsertP2PDistance, params)\r\n print(\"update complete...\")\r\n self.db.commit()\r\n except Exception as e:\r\n print(\"failed to update data\")\r\n print(str(e))\r\n self.db.rollback()\r\n\r\n\r\n def getProductMapping(self):\r\n product_map = {}\r\n reverse_map = {}\r\n productMapQuery = \"SELECT * FROM rec_index_product_mapping\"\r\n self.cursor.execute(productMapQuery)\r\n productMapResults = self.cursor.fetchall()\r\n for row in productMapResults:\r\n index = int(row[0])\r\n product_id = int(row[1])\r\n product_map[product_id] = index\r\n reverse_map[index] = product_id\r\n #print(product_map[product_id])\r\n return product_map, reverse_map\r\n \r\n def getCustomerMapping(self):\r\n customer_map = {}\r\n customerMapQuery = \"SELECT * FROM rec_index_customer_mapping\"\r\n self.cursor.execute(customerMapQuery)\r\n customerMapResults = self.cursor.fetchall()\r\n for row in customerMapResults:\r\n index= row[0]\r\n customer_id = row[1]\r\n customer_map[customer_id] = index\r\n #print(customer_map[customer_id])\r\n return customer_map\r\n\r\n def getCategoryMapping(self):\r\n category_map = {}\r\n categoryMapQuery = \"SELECT * FROM rec_index_category_mapping\"\r\n self.cursor.execute(categoryMapQuery)\r\n categoryMapResults = self.cursor.fetchall()\r\n for row in categoryMapResults:\r\n index= row[0]\r\n category_id = row[1]\r\n category_map[category_id] = index\r\n #print(customer_map[customer_id])\r\n return category_map", "sub_path": "cf_model/data.py", "file_name": "data.py", "file_ext": "py", "file_size_in_byte": 5901, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "db_info.config", "line_number": 14, "usage_type": "attribute"}, {"api_name": "db_info.config", "line_number": 15, "usage_type": "attribute"}, {"api_name": "db_info.config", "line_number": 16, "usage_type": "attribute"}, {"api_name": "db_info.config", "line_number": 17, "usage_type": "attribute"}, {"api_name": "db_info.config", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pymysql.connect", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "21440948", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndataset = pd.read_csv('Social_Network_Ads.csv')\nX = dataset.iloc[:, :-1].values # все кроме последней колонки\ny = dataset.iloc[:, -1].values # только последняя колонка\n\n# разодьем данные на тестовые и проверочные\nfrom sklearn.model_selection import train_test_split\nX_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2, random_state=1)#random_state=1 убирает рандом что он всегжа одинаков\n\n\n# feature scaling\nfrom sklearn.preprocessing import StandardScaler\nss=StandardScaler() #сколько среднеквадратичных отклонений содержит наша величина\nX_train=ss.fit_transform(X_train)#применяем к тестовой выборке\n# когда мы вызываем fit_transform мы (1) готовим модель кторая конвертирует, а потом на основе ее изменяем наши данные\nX_test=ss.transform(X_test) # тут только transform потому что мы ТОЛЬКО ЧТО создали модель странсформации, и среднее и отклонение УЖЕ расчи��аны, поэтому\n\n\n#SVM classification on the testing set\nfrom sklearn.svm import SVC\nlr=SVC(kernel='linear',random_state=0)\nlr.fit(X_train,y_train)\n\nresult=lr.predict(ss.transform([[60,65000]]))\nprint(result)\n\n\ny_pred=lr.predict(X_test)\n\nnp.set_printoptions()\nprint(np.concatenate(\n (y_test.reshape(len(y_test), 1),\n y_pred.reshape(len(y_pred), 1)\n ),\n 1))\n\n\n# making confusion matrix\n# количество правильных и не правильных предсказаний\nfrom sklearn.metrics import confusion_matrix, accuracy_score\n# вернет матрицу 2x2 где будет количество верно угаданных позитивных ответов и не угаданных позитивных\n# [[верно предсказанные положительные] [не верно предсказанные положительные]\n# [не верно предсказанные отрицательные] [верно предсказанные отрицательные]]\n# accuracy_score -- сколько верных предсказания\ncm=confusion_matrix(y_test,y_pred)\nprint(cm)\nprint(accuracy_score(y_test,y_pred)) # вернет от 0 до 1\n\n\n\n", "sub_path": "svm.py", "file_name": "svm.py", "file_ext": "py", "file_size_in_byte": 2531, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pandas.read_csv", "line_number": 5, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 11, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 16, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.set_printoptions", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 34, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 48, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "636572148", "text": "import socket\nimport urllib.request\nfrom bs4 import BeautifulSoup\nimport html5lib\n\nsumm = 0\npage = urllib.request.urlopen(\"http://python-data.dr-chuck.net/comments_203173.html\")\nsoup = BeautifulSoup(page, 'html5lib')\ntags = soup('span')\n\n\nfor tag in tags:\n summ = summ + int(tag.contents[0])\n\nprint ('The sum is %s' % summ)\n\ntags = soup('a')\nfor tag in tags:\n print (tag.get('href', None))", "sub_path": "Scrapping Data from URLs.py", "file_name": "Scrapping Data from URLs.py", "file_ext": "py", "file_size_in_byte": 395, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "urllib.request.request.urlopen", "line_number": 7, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 7, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 7, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "287330371", "text": "from rx_sharh.components.date import SyncCurrent\nfrom flask import request\n\nIP = str\nScore = float\n\nseconds_one_day = 60 * 60\n\n\nclass Dangerous:\n _ip_list = {}\n _ip_visit_record_time = {}\n\n @classmethod\n def get(cls) -> Score:\n ip = request.remote_addr\n return cls._ip_list.get(ip)\n\n @classmethod\n def refresh(cls):\n ip = request.remote_addr\n current = SyncCurrent.get_date()\n if ip in cls._ip_list:\n delta = (current - cls._ip_visit_record_time[ip]).total_seconds() / seconds_one_day\n score = cls._ip_list[ip]\n score = 0 if delta >= score else score - delta\n cls._ip_list[ip] = score\n else:\n cls._ip_list[ip] = 0\n score = 0\n cls._ip_visit_record_time[ip] = current\n return score\n\n @classmethod\n def add(cls, plus=1):\n ip = request.remote_addr\n current = SyncCurrent.get_date()\n if ip in cls._ip_list:\n delta = (current - cls._ip_visit_record_time[ip]).total_seconds() / seconds_one_day\n score = cls._ip_list[ip] + plus\n score = 0 if delta >= score else score - delta\n cls._ip_list[ip] = score\n else:\n score = cls._ip_list[ip] = plus\n\n cls._ip_visit_record_time[ip] = current\n return score\n", "sub_path": "rx_sharh/ip_ban/dangerous.py", "file_name": "dangerous.py", "file_ext": "py", "file_size_in_byte": 1333, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "flask.request.remote_addr", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 16, "usage_type": "name"}, {"api_name": "flask.request.remote_addr", "line_number": 21, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 21, "usage_type": "name"}, {"api_name": "rx_sharh.components.date.SyncCurrent.get_date", "line_number": 22, "usage_type": "call"}, {"api_name": "rx_sharh.components.date.SyncCurrent", "line_number": 22, "usage_type": "name"}, {"api_name": "flask.request.remote_addr", "line_number": 36, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 36, "usage_type": "name"}, {"api_name": "rx_sharh.components.date.SyncCurrent.get_date", "line_number": 37, "usage_type": "call"}, {"api_name": "rx_sharh.components.date.SyncCurrent", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "179084567", "text": "import requests\n\nport = 3000\naddr = 'timeslot'\n\n\ndef check_format(date, hour):\n\t# handle the date with no symbols (data from RiveScript)\n\tif '/' not in date:\n\t\tdate = date[:2] + '/' + date[2:4] + '/' + date[4:]\n\tif '-' not in hour:\n\t\tif hour[0] == '9':\n\t\t\thour = hour[0] + '-' + hour[1:]\n\t\telse:\n\t\t\thour = hour[:2] + '-' + hour[2:]\n\treturn date, hour\n\n\ndef get_recommand_time(dentist_name):\n\turl = \"http://{}:{}/v1/timeslots/{}/available\".format(addr, port, dentist_name)\n\tresponse = requests.get(url)\n\tdentist_available_timeslots = response.json()['messages']\n\tif not dentist_available_timeslots:\n\t\treturn False, 0, 0\n\telse:\n\t\tdate_num = len(dentist_available_timeslots)\n\t\tfrom random import randrange\n\t\tdate_index = randrange(date_num)\n\t\tdate = list(dentist_available_timeslots.keys())[date_index]\n\t\ttimeslot_num = len(dentist_available_timeslots[date])\n\t\ttimeslot_index = randrange(timeslot_num)\n\t\ttimeslot = dentist_available_timeslots[date][timeslot_index]\n\t\treturn True, date, timeslot\n\n\ndef get_dentist_available_timeslots(dentist_name):\n\tdentist_name = dentist_name.lower()\n\turl = \"http://{}:{}/v1/timeslots/{}/available\".format(addr, port, dentist_name)\n\tresponse = requests.get(url)\n\tif response.status_code == 200:\n\t\tdentist_available_timeslots = response.json()['messages']\n\t\tif dentist_available_timeslots:\n\t\t\treply = 'His/Her available times are: \\n'\n\t\t\tfor date in dentist_available_timeslots:\n\t\t\t\treply += date + ': \\n'\n\t\t\t\tfor hour in dentist_available_timeslots[date]:\n\t\t\t\t\treply += hour + ', '\n\t\t\t\treply = reply.strip(', ')\n\t\t\t\treply += '\\n'\n\t\t\treply += '\\n'\n\t\t\treply += 'Now you can book a reservation, just tell me the dentist and his/her timeslot :)'\n\t\t\treturn reply\n\t\telse:\n\t\t\treply = 'Oops! The dentist has no available time now.'\n\t\t\treturn reply\n\telse:\n\t\treply = 'Oops! ' + response.json()['messages']\n\t\treply += '.'\n\t\treturn reply\n\n\ndef reserve_timeslot(dentist_name, date, hour):\n\tdentist_name = dentist_name.lower()\n\tdate, hour = check_format(date, hour)\n\turl = \"http://{}:{}/v1/appointments/book\".format(addr, port)\n\tdata = {'dentist_name': dentist_name, 'date': date, 'hour': hour}\n\tresponse = requests.post(url, json=data)\n\n\tif response.status_code == 200:\n\t\tappointment_id = response.json()['messages']['app_id']\n\t\treply = 'Great! I booked an appointment for you: \\n'\n\t\tstart_hour, end_hour = hour.split('-')\n\t\treply += 'Dr. {} on {} from {} to {}, '.format(dentist_name, date, start_hour, end_hour)\n\t\treply += 'your appointment id is {}'.format(appointment_id)\n\t\treply += '.'\n\t\treturn reply\n\telse:\n\t\tmsg = response.json()['messages']\n\t\tif msg in ['The timeslot is not available', 'Invalid date', 'Invalid hour']:\n\t\t\tflag, recommend_date, recommand_timeslot = get_recommand_time(dentist_name)\n\t\t\tif flag:\n\t\t\t\treply = 'Oops! {}. '.format(msg)\n\t\t\t\treply += 'Would you like to book {} {}?'.format(recommend_date, recommand_timeslot)\n\t\t\t\treturn reply\n\t\t\telse:\n\t\t\t\treply = 'Oops! The dentist has no available time...'\n\t\t\t\treturn reply\n\t\treply = 'Oops! ' + msg\n\t\treply += '.'\n\t\treturn reply\n\n\ndef cancel_timeslot(dentist_name, date, hour):\n\tdentist_name = dentist_name.lower()\n\tdate, hour = check_format(date, hour)\n\tstart_hour, end_hour = hour.split('-')\n\tappointment_info = 'Appointment Info: Dr. {} on {} from {} to {}.'.format(dentist_name, date, start_hour, end_hour)\n\turl = \"http://{}:{}/v1/appointments/cancel\".format(addr, port)\n\tdata = {'dentist_name': dentist_name, 'date': date, 'hour': hour}\n\tresponse = requests.post(url, json=data)\n\tif response.status_code == 200:\n\t\treply = response.json()['messages']\n\t\treply += '.\\n'\n\t\treply += appointment_info\n\t\treturn reply\n\telse:\n\t\treply = 'Oops! ' + response.json()['messages']\n\t\treply += '.'\n\t\treturn reply\n\n\ndef cancel_timeslot_by_id(appointment_id):\n\tappointment_info = check_appointment(appointment_id)\n\turl = \"http://{}:{}/v1/appointments/{}/cancel\".format(addr, port, appointment_id)\n\tresponse = requests.get(url)\n\tif response.status_code == 200:\n\t\treply = response.json()['messages']\n\t\treply += '.\\n'\n\t\treply += appointment_info\n\t\treturn reply\n\telse:\n\t\treply = 'Oops! ' + response.json()['messages']\n\t\treply += '.'\n\t\treturn reply\n\n\ndef check_appointment(appointment_id):\n\turl = \"http://{}:{}/v1/appointments/{}\".format(addr, port, appointment_id)\n\tresponse = requests.get(url)\n\n\tif response.status_code == 200:\n\t\tappointment_info = response.json()['messages']\n\t\tdentist_name = appointment_info['dentist_name']\n\t\tdate = appointment_info['date']\n\t\thour = appointment_info['hour']\n\t\tstart_hour, end_hour = hour.split('-')\n\t\treply = 'Appointment Info: Dr. {} on {} from {} to {}.'.format(dentist_name, date, start_hour, end_hour)\n\t\treturn reply\n\telse:\n\t\treply = 'Oops! ' + response.json()['messages']\n\t\treply += '.'\n\t\treturn reply\n", "sub_path": "chatbot/app/demo/v1/api/brain/timeslot.py", "file_name": "timeslot.py", "file_ext": "py", "file_size_in_byte": 4715, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "requests.get", "line_number": 21, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 28, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 31, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 39, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 67, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 100, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 115, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 129, "usage_type": "call"}]} +{"seq_id": "67741906", "text": "import os\nimport json\nimport torch\nimport argparse\nimport numpy as np\nfrom multiprocessing import cpu_count\nfrom tensorboardX import SummaryWriter\nfrom torch.utils.data import DataLoader\nfrom collections import OrderedDict, defaultdict\nfrom tqdm import tqdm\n\nfrom dataset import SentenceDataset, collate_fn\nfrom model import VAE\nfrom loss import MaskedCrossEntropyLoss, KLLoss\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available else \"cpu\")\n\n\ndef kl_anneal_function(anneal_function, step, k, x0):\n if anneal_function == 'logistic':\n return float(1/(1+np.exp(-k*(step-x0))))\n elif anneal_function == 'linear':\n return min(1, step/x0)\n\n\ndef main(args):\n train_dataset = SentenceDataset(os.path.join(args.data_dir, 'train.txt'), args.vocab_file, args.max_sequence_length)\n valid_dataset = SentenceDataset(os.path.join(args.data_dir, 'valid.txt'), args.vocab_file, args.max_sequence_length)\n train_loader = DataLoader(\n dataset=train_dataset,\n batch_size=args.batch_size,\n shuffle=True,\n collate_fn=collate_fn,\n num_workers=4,\n drop_last=True,\n pin_memory=torch.cuda.is_available())\n valid_loader = DataLoader(\n dataset=valid_dataset,\n batch_size=args.batch_size,\n shuffle=False,\n collate_fn=collate_fn,\n num_workers=4,\n drop_last=True,\n pin_memory=torch.cuda.is_available())\n\n sos_idx = train_dataset.vocab['w2i']['']\n eos_idx = train_dataset.vocab['w2i']['']\n pad_idx = train_dataset.vocab['w2i']['']\n unk_idx = train_dataset.vocab['w2i']['']\n model = VAE(\n rnn_type=args.rnn_type,\n num_embeddings=train_dataset.vocab_size,\n dim_embedding=args.dim_embedding,\n dim_hidden=args.dim_hidden, \n num_layers=args.num_layers,\n bidirectional=args.bidirectional, \n dim_latent=args.dim_latent, \n word_dropout=args.word_dropout,\n dropout=args.dropout,\n sos_idx=sos_idx,\n eos_idx=eos_idx,\n pad_idx=pad_idx,\n unk_idx=unk_idx,\n max_sequence_length=args.max_sequence_length).to(device)\n\n print(model)\n\n re_criterion = MaskedCrossEntropyLoss()\n kl_criterion = KLLoss()\n\n optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)\n\n step = 0\n for epoch in range(args.epochs):\n model.train()\n tracker = {'ELBO': []}\n pbar = tqdm(train_loader)\n pbar.set_description('Epoch %3d / %d' % (epoch + 1, args.epochs))\n for iteration, (input, target, length) in enumerate(pbar):\n input = input.to(device)\n target = target.to(device)\n length = length.to(device)\n\n optimizer.zero_grad()\n output, mean, logvar, z = model(input, length, True)\n\n re_loss = re_criterion(output, target, target!=pad_idx)\n kl_loss = kl_criterion(mean, logvar)\n kl_weight = kl_anneal_function(args.anneal_function, step, args.k, args.x0)\n\n loss = re_loss + kl_weight * kl_loss\n elbo = re_loss + kl_loss\n\n loss.backward()\n optimizer.step()\n step+=1\n\n # bookkeepeing\n tracker['ELBO'].append(elbo.item())\n\n if iteration % args.print_every == 0 or iteration+1 == len(train_loader):\n pbar.set_postfix(loss=elbo.item(), re_loss=re_loss.item(), kl_loss=kl_loss.item(), kl_weight=kl_weight)\n\n print(\"Train Epoch %3d/%d, Mean ELBO %9.4f\"%(epoch+1, args.epochs, sum(tracker['ELBO']) / len(tracker['ELBO'])))\n\n model.eval()\n tracker = {'ELBO': []}\n with torch.no_grad():\n for input, target, length in valid_loader:\n input = input.to(device)\n target = target.to(device)\n length = length.to(device)\n\n output, mean, logvar, z = model(input, length)\n\n re_loss = re_criterion(output, target, target!=pad_idx)\n kl_loss = kl_criterion(mean, logvar)\n\n elbo = re_loss + kl_loss\n\n # bookkeepeing\n tracker['ELBO'].append(elbo.item())\n\n print(\"Valid Epoch %3d/%d, Mean ELBO %9.4f\"%(epoch+1, args.epochs, sum(tracker['ELBO']) / len(tracker['ELBO'])))\n\n checkpoint_path = os.path.join(args.save_path, \"model-%d.pth\" % (epoch+1))\n torch.save(model.state_dict(), checkpoint_path)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--data_dir', type=str, default='data')\n parser.add_argument('--vocab_file', type=str, default='data/vocab.json')\n parser.add_argument('--max_sequence_length', type=int, default=50)\n\n # model settings\n parser.add_argument('-dl', '--dim_latent', type=int, default=64)\n parser.add_argument('-nl', '--num_layers', type=int, default=1)\n parser.add_argument('-wd', '--word_dropout', type=float, default=0.75)\n parser.add_argument('-do', '--dropout', type=float, default=0.5)\n\n # rnn settings\n parser.add_argument('-de', '--dim_embedding', type=int, default=300)\n parser.add_argument('-rnn', '--rnn_type', type=str, default='gru')\n parser.add_argument('-dh', '--dim_hidden', type=int, default=256)\n parser.add_argument('-bi', '--bidirectional', action='store_true')\n\n # training settings\n parser.add_argument('-ep', '--epochs', type=int, default=30)\n parser.add_argument('-bs', '--batch_size', type=int, default=64)\n parser.add_argument('-lr', '--learning_rate', type=float, default=0.0005)\n parser.add_argument('-af', '--anneal_function', type=str, default='logistic')\n parser.add_argument('-k', '--k', type=float, default=0.0025)\n parser.add_argument('-x0', '--x0', type=int, default=2500)\n\n parser.add_argument('-v','--print_every', type=int, default=50)\n parser.add_argument('-save','--save_path', type=str, default='save')\n\n args = parser.parse_args()\n\n main(args)\n", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 5943, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "torch.device", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 21, "usage_type": "call"}, {"api_name": "dataset.SentenceDataset", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "dataset.SentenceDataset", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 29, "usage_type": "call"}, {"api_name": "dataset.collate_fn", "line_number": 33, "usage_type": "name"}, {"api_name": "torch.cuda.is_available", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 36, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 37, "usage_type": "call"}, {"api_name": "dataset.collate_fn", "line_number": 41, "usage_type": "name"}, {"api_name": "torch.cuda.is_available", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 44, "usage_type": "attribute"}, {"api_name": "model.VAE", "line_number": 50, "usage_type": "call"}, {"api_name": "loss.MaskedCrossEntropyLoss", "line_number": 68, "usage_type": "call"}, {"api_name": "loss.KLLoss", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 71, "usage_type": "attribute"}, {"api_name": "model.parameters", "line_number": 71, "usage_type": "call"}, {"api_name": "model.train", "line_number": 75, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 77, "usage_type": "call"}, {"api_name": "loss.backward", "line_number": 94, "usage_type": "call"}, {"api_name": "model.eval", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 126, "usage_type": "call"}, {"api_name": "os.path", "line_number": 126, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 127, "usage_type": "call"}, {"api_name": "model.state_dict", "line_number": 127, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 131, "usage_type": "call"}]} +{"seq_id": "99323056", "text": "from twisted.internet import defer\nfrom twisted.trial.unittest import TestCase\nfrom thinserve.api.referenceable import Referenceable\nfrom thinserve.api.remerr import RemoteError\nfrom thinserve.proto import session\nfrom thinserve.proto.lazyparser import LazyParser\nfrom thinserve.tests.testutil import check_lists_equal\n\n\nclass SessionTests (TestCase):\n def setUp(self):\n\n self._eaf_info = None\n\n @Referenceable\n class C (object):\n @Referenceable.Method\n def eat_a_fruit(s, fruit):\n realfruit = fruit.parse_type(str)\n\n # Note: self is outer self (a SessionTests instance):\n self.assertIsNotNone(self._eaf_info)\n (param, ret) = self._eaf_info\n self._eaf_info = None\n\n self.assertEqual(param, realfruit)\n return ret\n\n @Referenceable.Method\n def throw_a_fruit(s, fruit):\n fruit.parse_type(int)\n\n self.root = C()\n self.s = session.Session(self.root)\n\n # Setup some immediate call/reply info:\n self.replies = []\n self.params = []\n\n setupinfo = [('Yum {}!', self.replies),\n ('Fruit #{}', self.params)]\n for i in range(17):\n for tmpl, container in setupinfo:\n container.append(tmpl.format(i))\n\n def test_empty_gather_outgoing_messages(self):\n d = self.s.gather_outgoing_messages()\n\n # The deferred should be waiting to fire:\n self.failIf(d.called)\n\n # Do not return d, which will never fire.\n\n def test_receive_n_immediate_calls_then_gather_n_data_replies(self):\n return self._receive_n_calls_check_replies(\n 'eat_a_fruit',\n [\n ['reply',\n {'id': callid,\n 'result': ['data', reply]}]\n\n for callid, reply\n in enumerate(self.replies)\n ])\n\n def test_receive_n_immediate_calls_then_gather_n_error_replies(self):\n tmpl = 'unexpected type {actual}, expecting {expected}'\n\n return self._receive_n_calls_check_replies(\n 'throw_a_fruit',\n [\n ['reply',\n {'id': i,\n 'result': ['error',\n {'template': tmpl,\n 'params': {'expected': 'int',\n 'actual': 'str'},\n 'path': '/call.method/throw_a_fruit.fruit',\n 'message': 'Fruit #{}'.format(i)}]}]\n\n for i\n in range(len(self.params))\n ])\n\n def _receive_n_calls_check_replies(self, methodname, expectedreplies):\n\n callpairs = zip(self.params, self.replies)\n\n for callid, (param, reply) in enumerate(callpairs):\n self._eaf_info = (param, reply)\n self.s.receive_message(\n LazyParser(\n ['call',\n {'id': callid,\n 'target': None,\n 'method': [methodname, {'fruit': param}]}]))\n\n d = self.s.gather_outgoing_messages()\n\n # The deferred is called with a reply:\n self.failUnless(d.called)\n d.addCallback(\n lambda msgs: check_lists_equal(self, expectedreplies, msgs))\n return d\n\n def test_gather_n_calls_then_receive_n_data_replies(self):\n return self._gather_n_calls_then_receive_replies(\n 'eat_a_fruit',\n make_result=lambda reply: ['data', reply],\n check_result=lambda res, reply: self.assertIs(reply, res.unwrap()),\n check_err=None)\n\n def test_gather_n_calls_then_receive_n_error_replies(self):\n return self._gather_n_calls_then_receive_replies(\n 'eat_a_fruit',\n make_result=lambda reply: ['error', reply],\n check_result=lambda _x, _y: self.fail('Unexpected'),\n check_err=lambda f: self.assertIsInstance(f.value, RemoteError))\n\n def _gather_n_calls_then_receive_replies(self,\n methodname,\n make_result,\n check_result,\n check_err):\n fakeid = 'fake-client-id'\n\n repdefs = []\n\n # Prime the (private) outgoing call queue:\n for param in self.params:\n repdefs.append(\n self.s._send_call(\n target=fakeid,\n method=methodname,\n params={'fruit': param}))\n\n d = self.s.gather_outgoing_messages()\n\n self.failUnless(d.called)\n\n @d.addCallback\n def take_messages(messages):\n check_lists_equal(\n self,\n [\n ['call',\n {'id': callid,\n 'target': fakeid,\n 'method': [methodname, {'fruit': param}]}]\n\n for callid, param\n in enumerate(self.params)\n ],\n messages)\n\n for callid, (reply, d) in enumerate(zip(self.replies, repdefs)):\n self.failIf(d.called)\n\n self.s.receive_message(\n LazyParser(\n ['reply',\n {'id': callid,\n 'result': make_result(reply)}]))\n\n self.failUnless(d.called)\n d.addCallbacks(check_result, check_err, callbackArgs=(reply,))\n\n d.addCallback(lambda _: defer.DeferredList(repdefs))\n return d\n", "sub_path": "thinserve/tests/proto/test_session.py", "file_name": "test_session.py", "file_ext": "py", "file_size_in_byte": 5691, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "twisted.trial.unittest.TestCase", "line_number": 10, "usage_type": "name"}, {"api_name": "thinserve.api.referenceable.Referenceable.Method", "line_number": 17, "usage_type": "attribute"}, {"api_name": "thinserve.api.referenceable.Referenceable", "line_number": 17, "usage_type": "name"}, {"api_name": "thinserve.api.referenceable.Referenceable.Method", "line_number": 29, "usage_type": "attribute"}, {"api_name": "thinserve.api.referenceable.Referenceable", "line_number": 29, "usage_type": "name"}, {"api_name": "thinserve.api.referenceable.Referenceable", "line_number": 15, "usage_type": "name"}, {"api_name": "thinserve.proto.session.Session", "line_number": 34, "usage_type": "call"}, {"api_name": "thinserve.proto.session", "line_number": 34, "usage_type": "name"}, {"api_name": "thinserve.proto.lazyparser.LazyParser", "line_number": 92, "usage_type": "call"}, {"api_name": "thinserve.tests.testutil.check_lists_equal", "line_number": 103, "usage_type": "call"}, {"api_name": "thinserve.api.remerr.RemoteError", "line_number": 118, "usage_type": "argument"}, {"api_name": "thinserve.tests.testutil.check_lists_equal", "line_number": 143, "usage_type": "call"}, {"api_name": "thinserve.proto.lazyparser.LazyParser", "line_number": 160, "usage_type": "call"}, {"api_name": "twisted.internet.defer.DeferredList", "line_number": 168, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 168, "usage_type": "name"}]} +{"seq_id": "262484950", "text": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Provides utilities to preprocess images.\nThis is similar to VGG preprocessing except it applies a retina filter on the resized images at a randomly selected\nfoveal point around the center. The foveation ratio is held constant. This implementation uses a flag to set the maximum\nview of the retinal transformation.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport tensorflow as tf\n\nfrom tensorflow.python.ops import control_flow_ops\nfrom absl import flags\nimport define_flags\nfrom retina.retina_tf import warp_image\n\n\nslim = tf.contrib.slim\nFLAGS = flags.FLAGS\n# FLAGS = tf.app.flags.FLAGS\n\n\n_RESIZE_SIDE_MIN = 350\n_RESIZE_SIDE_MAX = 512\n\n_RESIZE_EVAL = 299\n\n\ndef _crop(image, offset_height, offset_width, crop_height, crop_width):\n \"\"\"Crops the given image using the provided offsets and sizes.\n Note that the method doesn't assume we know the input image size but it does\n assume we know the input image rank.\n Args:\n image: an image of shape [height, width, channels].\n offset_height: a scalar tensor indicating the height offset.\n offset_width: a scalar tensor indicating the width offset.\n crop_height: the height of the cropped image.\n crop_width: the width of the cropped image.\n Returns:\n the cropped (and resized) image.\n Raises:\n InvalidArgumentError: if the rank is not 3 or if the image dimensions are\n less than the crop size.\n \"\"\"\n original_shape = image.shape\n\n rank_assertion = tf.Assert(\n tf.equal(tf.rank(image), 3),\n ['Rank of image must be equal to 3.'])\n cropped_shape = control_flow_ops.with_dependencies(\n [rank_assertion],\n tf.stack([crop_height, crop_width, original_shape[2]]))\n\n size_assertion = tf.Assert(\n tf.logical_and(\n tf.greater_equal(original_shape[0], crop_height),\n tf.greater_equal(original_shape[1], crop_width)),\n ['Crop size greater than the image size.'])\n\n offsets = tf.to_float(tf.stack([offset_height, offset_width]))\n\n # Use tf.slice instead of crop_to_bounding box as it accepts tensors to\n # define the crop size.\n image = control_flow_ops.with_dependencies(\n [size_assertion],\n warp_image(image, output_size=crop_height, shift=offsets))\n return tf.reshape(image, cropped_shape)\n\n\ndef _random_crop(image_list, crop_height, crop_width):\n \"\"\"Crops the given list of images.\n The function applies the same crop to each image in the list. This can be\n effectively applied when there are multiple image inputs of the same\n dimension such as:\n image, depths, normals = _random_crop([image, depths, normals], 120, 150)\n Args:\n image_list: a list of image tensors of the same dimension but possibly\n varying channel.\n crop_height: the new height.\n crop_width: the new width.\n Returns:\n the image_list with cropped images.\n Raises:\n ValueError: if there are multiple image inputs provided with different size\n or the images are smaller than the crop dimensions.\n \"\"\"\n if not image_list:\n raise ValueError('Empty image_list.')\n\n # Compute the rank assertions.\n rank_assertions = []\n for i in range(len(image_list)):\n image_rank = tf.rank(image_list[i])\n rank_assert = tf.Assert(\n tf.equal(image_rank, 3),\n ['Wrong rank for tensor %s [expected] [actual]',\n image_list[i].name, 3, image_rank])\n rank_assertions.append(rank_assert)\n\n image_shape = control_flow_ops.with_dependencies(\n [rank_assertions[0]],\n tf.shape(image_list[0]))\n image_height = image_shape[0]\n image_width = image_shape[1]\n crop_size_assert = tf.Assert(\n tf.logical_and(\n tf.greater_equal(image_height, crop_height),\n tf.greater_equal(image_width, crop_width)),\n ['Crop size greater than the image size.'])\n\n asserts = [rank_assertions[0], crop_size_assert]\n\n for i in range(1, len(image_list)):\n image = image_list[i]\n asserts.append(rank_assertions[i])\n shape = control_flow_ops.with_dependencies([rank_assertions[i]],\n tf.shape(image))\n height = shape[0]\n width = shape[1]\n\n height_assert = tf.Assert(\n tf.equal(height, image_height),\n ['Wrong height for tensor %s [expected][actual]',\n image.name, height, image_height])\n width_assert = tf.Assert(\n tf.equal(width, image_width),\n ['Wrong width for tensor %s [expected][actual]',\n image.name, width, image_width])\n asserts.extend([height_assert, width_assert])\n\n # Create a random bounding box.\n #\n # Use tf.random_uniform and not numpy.random.rand as doing the former would\n # generate random numbers at graph eval time, unlike the latter which\n # generates random numbers at graph definition time.\n max_offset_height = control_flow_ops.with_dependencies(\n asserts, tf.cast((image_height - crop_height) / 2, tf.float32))\n max_offset_width = control_flow_ops.with_dependencies(\n asserts, tf.cast((image_width - crop_width) / 2, tf.float32))\n offset_height = tf.random_uniform(\n [], minval=-max_offset_height, maxval=max_offset_height, dtype=tf.float32)\n offset_width = tf.random_uniform(\n [], minval=-max_offset_width, maxval=max_offset_width, dtype=tf.float32)\n\n return [_crop(image, offset_height, offset_width,\n crop_height, crop_width) for image in image_list]\n\n\ndef _central_crop(image_list, crop_height, crop_width):\n \"\"\"Performs central crops of the given image list.\n Args:\n image_list: a list of image tensors of the same dimension but possibly\n varying channel.\n crop_height: the height of the image following the crop.\n crop_width: the width of the image following the crop.\n Returns:\n the list of cropped images.\n \"\"\"\n outputs = []\n for image in image_list:\n # image_height = tf.shape(image)[0]\n # image_width = tf.shape(image)[1]\n\n # offset_height = (image_height - crop_height) / 2\n # offset_width = (image_width - crop_width) / 2\n\n outputs.append(_crop(image, tf.constant([0], tf.float32), tf.constant([0], tf.float32),\n crop_height, crop_width))\n return outputs\n\n\ndef _mean_image_subtraction(image, means):\n \"\"\"Subtracts the given means from each image channel.\n For example:\n means = [123.68, 116.779, 103.939]\n image = _mean_image_subtraction(image, means)\n Note that the rank of `image` must be known.\n Args:\n image: a tensor of size [height, width, C].\n means: a C-vector of values to subtract from each channel.\n Returns:\n the centered image.\n Raises:\n ValueError: If the rank of `image` is unknown, if `image` has a rank other\n than three or if the number of channels in `image` doesn't match the\n number of values in `means`.\n \"\"\"\n if image.get_shape().ndims != 3:\n raise ValueError('Input must be of size [height, width, C>0]')\n num_channels = image.get_shape().as_list()[-1]\n if len(means) != num_channels:\n raise ValueError('len(means) must match the number of channels')\n\n channels = tf.split(axis=2, num_or_size_splits=num_channels, value=image)\n for i in range(num_channels):\n channels[i] -= means[i]\n return tf.concat(axis=2, values=channels)\n\n\ndef _smallest_size_at_least(height, width, smallest_side):\n \"\"\"Computes new shape with the smallest side equal to `smallest_side`.\n Computes new shape with the smallest side equal to `smallest_side` while\n preserving the original aspect ratio.\n Args:\n height: an int32 scalar tensor indicating the current height.\n width: an int32 scalar tensor indicating the current width.\n smallest_side: A python integer or scalar `Tensor` indicating the size of\n the smallest side after resize.\n Returns:\n new_height: an int32 scalar tensor indicating the new height.\n new_width: and int32 scalar tensor indicating the new width.\n \"\"\"\n smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)\n\n height = tf.to_float(height)\n width = tf.to_float(width)\n smallest_side = tf.to_float(smallest_side)\n\n scale = tf.cond(tf.greater(height, width),\n lambda: smallest_side / width,\n lambda: smallest_side / height)\n new_height = tf.to_int32(height * scale)\n new_width = tf.to_int32(width * scale)\n return new_height, new_width\n\n\ndef _aspect_preserving_resize(image, smallest_side):\n \"\"\"Resize images preserving the original aspect ratio.\n Args:\n image: A 3-D image `Tensor`.\n smallest_side: A python integer or scalar `Tensor` indicating the size of\n the smallest side after resize.\n Returns:\n resized_image: A 3-D tensor containing the resized image.\n \"\"\"\n smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)\n\n shape = tf.shape(image)\n height = shape[0]\n width = shape[1]\n new_height, new_width = _smallest_size_at_least(height, width, smallest_side)\n image = tf.expand_dims(image, 0)\n resized_image = tf.image.resize_bilinear(image, [new_height, new_width],\n align_corners=False)\n resized_image = tf.squeeze(resized_image)\n resized_image.set_shape([None, None, 3])\n return resized_image\n\n\ndef inputs(dataset, batch_size=None, num_preprocess_threads=None):\n \"\"\"Generate batches of ImageNet images for evaluation.\n\n Use this function as the inputs for evaluating a network.\n\n Note that some (minimal) image preprocessing occurs during evaluation\n including central cropping and resizing of the image to fit the network.\n\n Args:\n dataset: instance of Dataset class specifying the dataset.\n batch_size: integer, number of examples in batch\n num_preprocess_threads: integer, total number of preprocessing threads but\n None defaults to FLAGS.num_preprocess_threads.\n\n Returns:\n images: Images. 4D tensor of size [batch_size, FLAGS.image_size,\n image_size, 3].\n labels: 1-D integer Tensor of [FLAGS.batch_size].\n \"\"\"\n if not batch_size:\n batch_size = FLAGS.batch_size\n\n # Force all input processing onto CPU in order to reserve the GPU for\n # the forward inference and back-propagation.\n with tf.device('/cpu:0'):\n images, labels = batch_inputs(\n dataset, batch_size, train=False,\n num_preprocess_threads=num_preprocess_threads,\n num_readers=1)\n\n return images, labels\n\n\ndef distorted_inputs(dataset, batch_size=None, num_preprocess_threads=None):\n \"\"\"Generate batches of distorted versions of ImageNet images.\n\n Use this function as the inputs for training a network.\n\n Distorting images provides a useful technique for augmenting the data\n set during training in order to make the network invariant to aspects\n of the image that do not effect the label.\n\n Args:\n dataset: instance of Dataset class specifying the dataset.\n batch_size: integer, number of examples in batch\n num_preprocess_threads: integer, total number of preprocessing threads but\n None defaults to FLAGS.num_preprocess_threads.\n\n Returns:\n images: Images. 4D tensor of size [batch_size, FLAGS.image_size,\n FLAGS.image_size, 3].\n labels: 1-D integer Tensor of [batch_size].\n \"\"\"\n if not batch_size:\n batch_size = FLAGS.batch_size\n\n # Force all input processing onto CPU in order to reserve the GPU for\n # the forward inference and back-propagation.\n with tf.device('/cpu:0'):\n images, labels = batch_inputs(\n dataset, batch_size, train=True,\n num_preprocess_threads=num_preprocess_threads,\n num_readers=FLAGS.num_readers)\n return images, labels\n\n\ndef decode_jpeg(image_buffer, scope=None):\n \"\"\"Decode a JPEG string into one 3-D float image Tensor.\n\n Args:\n image_buffer: scalar string Tensor.\n scope: Optional scope for op_scope.\n Returns:\n 3-D float Tensor with values ranging from [0, 1).\n \"\"\"\n with tf.name_scope(scope, 'decode_jpeg', [image_buffer]):\n # Decode the string as an RGB JPEG.\n # Note that the resulting image contains an unknown height and width\n # that is set dynamically by decode_jpeg. In other words, the height\n # and width of image is unknown at compile-time.\n image = tf.image.decode_jpeg(image_buffer, channels=3)\n\n # After this point, all image pixels reside in [0,1)\n # until the very end, when they're rescaled to (-1, 1). The various\n # adjust_* ops all require this range for dtype float.\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n return image\n\n\ndef preprocess_for_train(image_buffer,\n output_height,\n output_width,\n resize_side_min=_RESIZE_SIDE_MIN,\n resize_side_max=_RESIZE_SIDE_MAX):\n \"\"\"Preprocesses the given image for training.\n Note that the actual resizing scale is sampled from\n [`resize_size_min`, `resize_size_max`].\n Args:\n image: A `Tensor` representing an image of arbitrary size.\n output_height: The height of the image after preprocessing.\n output_width: The width of the image after preprocessing.\n resize_side_min: The lower bound for the smallest side of the image for\n aspect-preserving resizing.\n resize_side_max: The upper bound for the smallest side of the image for\n aspect-preserving resizing.\n Returns:\n A preprocessed image.\n \"\"\"\n image = decode_jpeg(image_buffer)\n resize_side = tf.random_uniform(\n [], minval=resize_side_min, maxval=resize_side_max + 1, dtype=tf.int32)\n\n image = _aspect_preserving_resize(image, resize_side)\n image = _random_crop([image], output_height, output_width)[0]\n image.set_shape([output_height, output_width, 3])\n image = tf.to_float(image)\n image = tf.image.random_flip_left_right(image)\n return image\n\n\ndef preprocess_for_eval(image_buffer, output_height, output_width, resize_side):\n \"\"\"Preprocesses the given image for evaluation.\n Args:\n image: A `Tensor` representing an image of arbitrary size.\n output_height: The height of the image after preprocessing.\n output_width: The width of the image after preprocessing.\n resize_side: The smallest side of the image for aspect-preserving resizing.\n Returns:\n A preprocessed image.\n \"\"\"\n image = decode_jpeg(image_buffer)\n image = _aspect_preserving_resize(image, resize_side)\n image = _central_crop([image], output_height, output_width)[0]\n image.set_shape([output_height, output_width, 3])\n image = tf.to_float(image)\n return image\n\n\ndef image_preprocessing(image, output_height, output_width, is_training=False,\n resize_side_min=_RESIZE_SIDE_MIN,\n resize_side_max=_RESIZE_SIDE_MAX):\n \"\"\"Preprocesses the given image.\n Args:\n image: A `Tensor` representing an image of arbitrary size.\n output_height: The height of the image after preprocessing.\n output_width: The width of the image after preprocessing.\n is_training: `True` if we're preprocessing the image for training and\n `False` otherwise.\n resize_side_min: The lower bound for the smallest side of the image for\n aspect-preserving resizing. If `is_training` is `False`, then this value\n is used for rescaling.\n resize_side_max: The upper bound for the smallest side of the image for\n aspect-preserving resizing. If `is_training` is `False`, this value is\n ignored. Otherwise, the resize side is sampled from\n [resize_size_min, resize_size_max].\n Returns:\n A preprocessed image.\n \"\"\"\n if is_training:\n image = preprocess_for_train(image, output_height, output_width,\n resize_side_min, resize_side_max)\n else:\n image = preprocess_for_eval(image, output_height, output_width,\n _RESIZE_EVAL)\n image = tf.subtract(image, 0.5)\n image = tf.multiply(image, 2.0)\n return image\n\n\ndef parse_example_proto(example_serialized):\n \"\"\"Parses an Example proto containing a training example of an image.\n\n The output of the build_image_data.py image preprocessing script is a dataset\n containing serialized Example protocol buffers. Each Example proto contains\n the following fields:\n\n image/height: 462\n image/width: 581\n image/colorspace: 'RGB'\n image/channels: 3\n image/class/label: 615\n image/class/synset: 'n03623198'\n image/class/text: 'knee pad'\n image/object/bbox/xmin: 0.1\n image/object/bbox/xmax: 0.9\n image/object/bbox/ymin: 0.2\n image/object/bbox/ymax: 0.6\n image/object/bbox/label: 615\n image/format: 'JPEG'\n image/filename: 'ILSVRC2012_val_00041207.JPEG'\n image/encoded: \n\n Args:\n example_serialized: scalar Tensor tf.string containing a serialized\n Example protocol buffer.\n\n Returns:\n image_buffer: Tensor tf.string containing the contents of a JPEG file.\n label: Tensor tf.int32 containing the label.\n bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]\n where each coordinate is [0, 1) and the coordinates are arranged as\n [ymin, xmin, ymax, xmax].\n text: Tensor tf.string containing the human-readable label.\n \"\"\"\n # Dense features in Example proto.\n feature_map = {\n 'image/encoded': tf.FixedLenFeature([], dtype=tf.string,\n default_value=''),\n 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64,\n default_value=-1),\n 'image/class/text': tf.FixedLenFeature([], dtype=tf.string,\n default_value=''),\n }\n sparse_float32 = tf.VarLenFeature(dtype=tf.float32)\n # Sparse features in Example proto.\n feature_map.update(\n {k: sparse_float32 for k in ['image/object/bbox/xmin',\n 'image/object/bbox/ymin',\n 'image/object/bbox/xmax',\n 'image/object/bbox/ymax']})\n\n features = tf.parse_single_example(example_serialized, feature_map)\n label = tf.cast(features['image/class/label'], dtype=tf.int32)\n\n xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)\n ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0)\n xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0)\n ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)\n\n # Note that we impose an ordering of (y, x) just to make life difficult.\n bbox = tf.concat([ymin, xmin, ymax, xmax], 0)\n\n # Force the variable number of bounding boxes into the shape\n # [1, num_boxes, coords].\n bbox = tf.expand_dims(bbox, 0)\n bbox = tf.transpose(bbox, [0, 2, 1])\n\n return features['image/encoded'], label, bbox, features['image/class/text']\n\n\ndef batch_inputs(dataset, batch_size, train, num_preprocess_threads=None,\n num_readers=1):\n \"\"\"Contruct batches of training or evaluation examples from the image dataset.\n\n Args:\n dataset: instance of Dataset class specifying the dataset.\n See dataset.py for details.\n batch_size: integer\n train: boolean\n num_preprocess_threads: integer, total number of preprocessing threads\n num_readers: integer, number of parallel readers\n\n Returns:\n images: 4-D float Tensor of a batch of images\n labels: 1-D integer Tensor of [batch_size].\n\n Raises:\n ValueError: if data is not found\n \"\"\"\n with tf.name_scope('batch_processing'):\n data_files = dataset.data_files()\n if data_files is None:\n raise ValueError('No data files found for this dataset')\n\n # Create filename_queue\n if train:\n filename_queue = tf.train.string_input_producer(data_files,\n shuffle=True,\n capacity=16)\n else:\n filename_queue = tf.train.string_input_producer(data_files,\n shuffle=False,\n capacity=1)\n if num_preprocess_threads is None:\n num_preprocess_threads = FLAGS.num_preprocess_threads\n\n if num_preprocess_threads % 4:\n raise ValueError('Please make num_preprocess_threads a multiple '\n 'of 4 (%d % 4 != 0).', num_preprocess_threads)\n\n if num_readers is None:\n num_readers = FLAGS.num_readers\n\n if num_readers < 1:\n raise ValueError('Please make num_readers at least 1')\n\n # Approximate number of examples per shard.\n examples_per_shard = 1024\n # Size the random shuffle queue to balance between good global\n # mixing (more examples) and memory use (fewer examples).\n # 1 image uses 299*299*3*4 bytes = 1MB\n # The default input_queue_memory_factor is 16 implying a shuffling queue\n # size: examples_per_shard * 16 * 1MB = 17.6GB\n min_queue_examples = examples_per_shard * FLAGS.input_queue_memory_factor\n if train:\n examples_queue = tf.RandomShuffleQueue(\n capacity=min_queue_examples + 3 * batch_size,\n min_after_dequeue=min_queue_examples,\n dtypes=[tf.string])\n else:\n examples_queue = tf.FIFOQueue(\n capacity=examples_per_shard + 3 * batch_size,\n dtypes=[tf.string])\n\n # Create multiple readers to populate the queue of examples.\n if num_readers > 1:\n enqueue_ops = []\n for _ in range(num_readers):\n reader = dataset.reader()\n _, value = reader.read(filename_queue)\n enqueue_ops.append(examples_queue.enqueue([value]))\n\n tf.train.queue_runner.add_queue_runner(\n tf.train.queue_runner.QueueRunner(examples_queue, enqueue_ops))\n example_serialized = examples_queue.dequeue()\n else:\n reader = dataset.reader()\n _, example_serialized = reader.read(filename_queue)\n\n images_and_labels = []\n for thread_id in range(num_preprocess_threads):\n # Parse a serialized Example proto to extract the image and metadata.\n image_buffer, label_index, bbox, _ = parse_example_proto(\n example_serialized)\n image = image_preprocessing(image_buffer,\n output_height=FLAGS.image_size,\n output_width=FLAGS.image_size,\n is_training=train)\n images_and_labels.append([image, label_index])\n\n images, label_index_batch = tf.train.batch_join(\n images_and_labels,\n batch_size=batch_size,\n capacity=2 * num_preprocess_threads * batch_size)\n\n # Reshape images into these desired dimensions.\n height = FLAGS.image_size\n width = FLAGS.image_size\n depth = 3\n\n images = tf.cast(images, tf.float32)\n images = tf.reshape(images, shape=[batch_size, height, width, depth])\n\n # Display the training images in the visualizer.\n tf.summary.image('images', images)\n\n return images, tf.reshape(label_index_batch, [batch_size])\n", "sub_path": "pbtricks/preprocessors/image_processing_inception_retina.py", "file_name": "image_processing_inception_retina.py", "file_ext": "py", "file_size_in_byte": 23336, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "tensorflow.contrib", "line_number": 31, "usage_type": "attribute"}, {"api_name": "absl.flags.FLAGS", "line_number": 32, "usage_type": "attribute"}, {"api_name": "absl.flags", "line_number": 32, "usage_type": "name"}, {"api_name": "tensorflow.Assert", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.equal", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.rank", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.python.ops.control_flow_ops.with_dependencies", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.python.ops.control_flow_ops", "line_number": 63, "usage_type": "name"}, {"api_name": "tensorflow.stack", "line_number": 65, "usage_type": "call"}, {"api_name": "tensorflow.Assert", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow.logical_and", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.greater_equal", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.greater_equal", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.to_float", "line_number": 73, "usage_type": "call"}, {"api_name": "tensorflow.stack", "line_number": 73, "usage_type": "call"}, {"api_name": "tensorflow.python.ops.control_flow_ops.with_dependencies", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.python.ops.control_flow_ops", "line_number": 77, "usage_type": "name"}, {"api_name": "retina.retina_tf.warp_image", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 80, "usage_type": "call"}, {"api_name": "tensorflow.rank", "line_number": 106, "usage_type": "call"}, {"api_name": "tensorflow.Assert", "line_number": 107, "usage_type": "call"}, {"api_name": "tensorflow.equal", "line_number": 108, "usage_type": "call"}, {"api_name": "tensorflow.python.ops.control_flow_ops.with_dependencies", "line_number": 113, "usage_type": "call"}, {"api_name": "tensorflow.python.ops.control_flow_ops", "line_number": 113, "usage_type": "name"}, {"api_name": "tensorflow.shape", "line_number": 115, "usage_type": "call"}, {"api_name": "tensorflow.Assert", "line_number": 118, "usage_type": "call"}, {"api_name": "tensorflow.logical_and", "line_number": 119, "usage_type": "call"}, {"api_name": "tensorflow.greater_equal", "line_number": 120, "usage_type": "call"}, {"api_name": "tensorflow.greater_equal", "line_number": 121, "usage_type": "call"}, {"api_name": "tensorflow.python.ops.control_flow_ops.with_dependencies", "line_number": 129, "usage_type": "call"}, {"api_name": "tensorflow.python.ops.control_flow_ops", "line_number": 129, "usage_type": "name"}, {"api_name": "tensorflow.shape", "line_number": 130, "usage_type": "call"}, {"api_name": "tensorflow.Assert", "line_number": 134, "usage_type": "call"}, {"api_name": "tensorflow.equal", "line_number": 135, "usage_type": "call"}, {"api_name": "tensorflow.Assert", "line_number": 138, "usage_type": "call"}, {"api_name": "tensorflow.equal", "line_number": 139, "usage_type": "call"}, {"api_name": "tensorflow.python.ops.control_flow_ops.with_dependencies", "line_number": 149, "usage_type": "call"}, {"api_name": "tensorflow.python.ops.control_flow_ops", "line_number": 149, "usage_type": "name"}, {"api_name": "tensorflow.cast", "line_number": 150, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 150, "usage_type": "attribute"}, {"api_name": "tensorflow.python.ops.control_flow_ops.with_dependencies", "line_number": 151, "usage_type": "call"}, {"api_name": "tensorflow.python.ops.control_flow_ops", "line_number": 151, "usage_type": "name"}, {"api_name": "tensorflow.cast", "line_number": 152, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 152, "usage_type": "attribute"}, {"api_name": "tensorflow.random_uniform", "line_number": 153, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 154, "usage_type": "attribute"}, {"api_name": "tensorflow.random_uniform", "line_number": 155, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 156, "usage_type": "attribute"}, {"api_name": "tensorflow.constant", "line_number": 180, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 180, "usage_type": "attribute"}, {"api_name": "tensorflow.split", "line_number": 207, "usage_type": "call"}, {"api_name": "tensorflow.concat", "line_number": 210, "usage_type": "call"}, {"api_name": "tensorflow.convert_to_tensor", "line_number": 226, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 226, "usage_type": "attribute"}, {"api_name": "tensorflow.to_float", "line_number": 228, "usage_type": "call"}, {"api_name": "tensorflow.to_float", "line_number": 229, "usage_type": "call"}, {"api_name": "tensorflow.to_float", "line_number": 230, "usage_type": "call"}, {"api_name": "tensorflow.cond", "line_number": 232, "usage_type": "call"}, {"api_name": "tensorflow.greater", "line_number": 232, "usage_type": "call"}, {"api_name": "tensorflow.to_int32", "line_number": 235, "usage_type": "call"}, {"api_name": "tensorflow.to_int32", "line_number": 236, "usage_type": "call"}, {"api_name": "tensorflow.convert_to_tensor", "line_number": 249, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 249, "usage_type": "attribute"}, {"api_name": "tensorflow.shape", "line_number": 251, "usage_type": "call"}, {"api_name": "tensorflow.expand_dims", "line_number": 255, "usage_type": "call"}, {"api_name": "tensorflow.image.resize_bilinear", "line_number": 256, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 256, "usage_type": "attribute"}, {"api_name": "tensorflow.squeeze", "line_number": 258, "usage_type": "call"}, {"api_name": "tensorflow.device", "line_number": 287, "usage_type": "call"}, {"api_name": "tensorflow.device", "line_number": 321, "usage_type": "call"}, {"api_name": "tensorflow.name_scope", "line_number": 338, "usage_type": "call"}, {"api_name": "tensorflow.image.decode_jpeg", "line_number": 343, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 343, "usage_type": "attribute"}, {"api_name": "tensorflow.image.convert_image_dtype", "line_number": 348, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 348, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 348, "usage_type": "attribute"}, {"api_name": "tensorflow.random_uniform", "line_number": 372, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 373, "usage_type": "attribute"}, {"api_name": "tensorflow.to_float", "line_number": 378, "usage_type": "call"}, {"api_name": "tensorflow.image.random_flip_left_right", "line_number": 379, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 379, "usage_type": "attribute"}, {"api_name": "tensorflow.to_float", "line_number": 397, "usage_type": "call"}, {"api_name": "tensorflow.subtract", "line_number": 427, "usage_type": "call"}, {"api_name": "tensorflow.multiply", "line_number": 428, "usage_type": "call"}, {"api_name": "tensorflow.FixedLenFeature", "line_number": 469, "usage_type": "call"}, {"api_name": "tensorflow.string", "line_number": 469, "usage_type": "attribute"}, {"api_name": "tensorflow.FixedLenFeature", "line_number": 471, "usage_type": "call"}, {"api_name": "tensorflow.int64", "line_number": 471, "usage_type": "attribute"}, {"api_name": "tensorflow.FixedLenFeature", "line_number": 473, "usage_type": "call"}, {"api_name": "tensorflow.string", "line_number": 473, "usage_type": "attribute"}, {"api_name": "tensorflow.VarLenFeature", "line_number": 476, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 476, "usage_type": "attribute"}, {"api_name": "tensorflow.parse_single_example", "line_number": 484, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 485, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 485, "usage_type": "attribute"}, {"api_name": "tensorflow.expand_dims", "line_number": 487, "usage_type": "call"}, {"api_name": "tensorflow.expand_dims", "line_number": 488, "usage_type": "call"}, {"api_name": "tensorflow.expand_dims", "line_number": 489, "usage_type": "call"}, {"api_name": "tensorflow.expand_dims", "line_number": 490, "usage_type": "call"}, {"api_name": "tensorflow.concat", "line_number": 493, "usage_type": "call"}, {"api_name": "tensorflow.expand_dims", "line_number": 497, "usage_type": "call"}, {"api_name": "tensorflow.transpose", "line_number": 498, "usage_type": "call"}, {"api_name": "tensorflow.name_scope", "line_number": 522, "usage_type": "call"}, {"api_name": "tensorflow.train.string_input_producer", "line_number": 529, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 529, "usage_type": "attribute"}, {"api_name": "tensorflow.train.string_input_producer", "line_number": 533, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 533, "usage_type": "attribute"}, {"api_name": "tensorflow.RandomShuffleQueue", "line_number": 558, "usage_type": "call"}, {"api_name": "tensorflow.string", "line_number": 561, "usage_type": "attribute"}, {"api_name": "tensorflow.FIFOQueue", "line_number": 563, "usage_type": "call"}, {"api_name": "tensorflow.string", "line_number": 565, "usage_type": "attribute"}, {"api_name": "tensorflow.train.queue_runner.add_queue_runner", "line_number": 575, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 575, "usage_type": "attribute"}, {"api_name": "tensorflow.train.queue_runner.QueueRunner", "line_number": 576, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 576, "usage_type": "attribute"}, {"api_name": "tensorflow.train.batch_join", "line_number": 593, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 593, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 603, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 603, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 604, "usage_type": "call"}, {"api_name": "tensorflow.summary.image", "line_number": 607, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 607, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 609, "usage_type": "call"}]} +{"seq_id": "64771969", "text": "import woodwork as ww\n\nfrom evalml.data_checks import (\n DataCheck,\n DataCheckAction,\n DataCheckActionCode,\n DataCheckError,\n DataCheckMessageCode,\n DataCheckWarning,\n)\nfrom evalml.objectives import get_objective\nfrom evalml.problem_types import (\n ProblemTypes,\n handle_problem_types,\n is_binary,\n is_multiclass,\n is_regression,\n)\nfrom evalml.utils.woodwork_utils import (\n infer_feature_types,\n numeric_and_boolean_ww,\n)\n\n\nclass InvalidTargetDataCheck(DataCheck):\n \"\"\"Checks if the target data contains missing or invalid values.\n\n Arguments:\n problem_type (str or ProblemTypes): The specific problem type to data check for.\n e.g. 'binary', 'multiclass', 'regression, 'time series regression'\n objective (str or ObjectiveBase): Name or instance of the objective class.\n n_unique (int): Number of unique target values to store when problem type is binary and target\n incorrectly has more than 2 unique values. Non-negative integer. If None, stores all unique values. Defaults to 100.\n \"\"\"\n\n multiclass_continuous_threshold = 0.05\n\n def __init__(self, problem_type, objective, n_unique=100):\n self.problem_type = handle_problem_types(problem_type)\n self.objective = get_objective(objective)\n if n_unique is not None and n_unique <= 0:\n raise ValueError(\"`n_unique` must be a non-negative integer value.\")\n self.n_unique = n_unique\n\n def validate(self, X, y):\n \"\"\"Checks if the target data contains missing or invalid values.\n\n Arguments:\n X (pd.DataFrame, np.ndarray): Features. Ignored.\n y (pd.Series, np.ndarray): Target data to check for invalid values.\n\n Returns:\n dict (DataCheckError): List with DataCheckErrors if any invalid values are found in the target data.\n\n Example:\n >>> import pandas as pd\n >>> X = pd.DataFrame({\"col\": [1, 2, 3, 1]})\n >>> y = pd.Series([0, 1, None, None])\n >>> target_check = InvalidTargetDataCheck('binary', 'Log Loss Binary')\n >>> assert target_check.validate(X, y) == {\"errors\": [{\"message\": \"2 row(s) (50.0%) of target values are null\",\\\n \"data_check_name\": \"InvalidTargetDataCheck\",\\\n \"level\": \"error\",\\\n \"code\": \"TARGET_HAS_NULL\",\\\n \"details\": {\"num_null_rows\": 2, \"pct_null_rows\": 50}}],\\\n \"warnings\": [],\\\n \"actions\": [{'code': 'IMPUTE_COL', 'metadata': {'column': None, 'impute_strategy': 'most_frequent', 'is_target': True}}]}\n \"\"\"\n results = {\"warnings\": [], \"errors\": [], \"actions\": []}\n\n if y is None:\n results[\"errors\"].append(\n DataCheckError(\n message=\"Target is None\",\n data_check_name=self.name,\n message_code=DataCheckMessageCode.TARGET_IS_NONE,\n details={},\n ).to_dict()\n )\n return results\n\n y = infer_feature_types(y)\n is_supported_type = y.ww.logical_type.type_string in numeric_and_boolean_ww + [\n ww.logical_types.Categorical.type_string\n ]\n if not is_supported_type:\n results[\"errors\"].append(\n DataCheckError(\n message=\"Target is unsupported {} type. Valid Woodwork logical types include: {}\".format(\n type(y.ww.logical_type),\n \", \".join([ltype for ltype in numeric_and_boolean_ww]),\n ),\n data_check_name=self.name,\n message_code=DataCheckMessageCode.TARGET_UNSUPPORTED_TYPE,\n details={\"unsupported_type\": y.ww.logical_type.type_string},\n ).to_dict()\n )\n null_rows = y.isnull()\n if null_rows.all():\n results[\"errors\"].append(\n DataCheckError(\n message=\"Target is either empty or fully null.\",\n data_check_name=self.name,\n message_code=DataCheckMessageCode.TARGET_IS_EMPTY_OR_FULLY_NULL,\n details={},\n ).to_dict()\n )\n return results\n elif null_rows.any():\n num_null_rows = null_rows.sum()\n pct_null_rows = null_rows.mean() * 100\n results[\"errors\"].append(\n DataCheckError(\n message=\"{} row(s) ({}%) of target values are null\".format(\n num_null_rows, pct_null_rows\n ),\n data_check_name=self.name,\n message_code=DataCheckMessageCode.TARGET_HAS_NULL,\n details={\n \"num_null_rows\": num_null_rows,\n \"pct_null_rows\": pct_null_rows,\n },\n ).to_dict()\n )\n impute_strategy = (\n \"mean\" if is_regression(self.problem_type) else \"most_frequent\"\n )\n results[\"actions\"].append(\n DataCheckAction(\n DataCheckActionCode.IMPUTE_COL,\n metadata={\n \"column\": None,\n \"is_target\": True,\n \"impute_strategy\": impute_strategy,\n },\n ).to_dict()\n )\n\n value_counts = y.value_counts()\n unique_values = value_counts.index.tolist()\n\n if is_binary(self.problem_type) and len(value_counts) != 2:\n if self.n_unique is None:\n details = {\"target_values\": unique_values}\n else:\n details = {\n \"target_values\": unique_values[\n : min(self.n_unique, len(unique_values))\n ]\n }\n results[\"errors\"].append(\n DataCheckError(\n message=\"Binary class targets require exactly two unique values.\",\n data_check_name=self.name,\n message_code=DataCheckMessageCode.TARGET_BINARY_NOT_TWO_UNIQUE_VALUES,\n details=details,\n ).to_dict()\n )\n\n if (\n self.problem_type == ProblemTypes.REGRESSION\n and \"numeric\" not in y.ww.semantic_tags\n ):\n results[\"errors\"].append(\n DataCheckError(\n message=\"Target data type should be numeric for regression type problems.\",\n data_check_name=self.name,\n message_code=DataCheckMessageCode.TARGET_UNSUPPORTED_TYPE,\n details={},\n ).to_dict()\n )\n\n if is_multiclass(self.problem_type):\n if value_counts.min() <= 1:\n least_populated = value_counts[value_counts <= 1]\n details = {\n \"least_populated_class_labels\": sorted(\n least_populated.index.tolist()\n )\n }\n results[\"errors\"].append(\n DataCheckError(\n message=\"Target does not have at least two instances per class which is required for multiclass classification\",\n data_check_name=self.name,\n message_code=DataCheckMessageCode.TARGET_MULTICLASS_NOT_TWO_EXAMPLES_PER_CLASS,\n details=details,\n ).to_dict()\n )\n if len(unique_values) <= 2:\n details = {\"num_classes\": len(unique_values)}\n results[\"errors\"].append(\n DataCheckError(\n message=\"Target has two or less classes, which is too few for multiclass problems. Consider changing to binary.\",\n data_check_name=self.name,\n message_code=DataCheckMessageCode.TARGET_MULTICLASS_NOT_ENOUGH_CLASSES,\n details=details,\n ).to_dict()\n )\n\n num_class_to_num_value_ratio = len(unique_values) / len(y)\n if num_class_to_num_value_ratio >= self.multiclass_continuous_threshold:\n details = {\"class_to_value_ratio\": num_class_to_num_value_ratio}\n results[\"warnings\"].append(\n DataCheckWarning(\n message=\"Target has a large number of unique values, could be regression type problem.\",\n data_check_name=self.name,\n message_code=DataCheckMessageCode.TARGET_MULTICLASS_HIGH_UNIQUE_CLASS,\n details=details,\n ).to_dict()\n )\n\n any_neg = (\n not (y > 0).all()\n if y.ww.logical_type.type_string\n in [\n ww.logical_types.Integer.type_string,\n ww.logical_types.Double.type_string,\n ]\n else None\n )\n if any_neg and self.objective.positive_only:\n details = {\n \"Count of offending values\": sum(val <= 0 for val in y.values.flatten())\n }\n results[\"errors\"].append(\n DataCheckError(\n message=f\"Target has non-positive values which is not supported for {self.objective.name}\",\n data_check_name=self.name,\n message_code=DataCheckMessageCode.TARGET_INCOMPATIBLE_OBJECTIVE,\n details=details,\n ).to_dict()\n )\n\n if X is not None:\n X = infer_feature_types(X)\n X_index = list(X.index)\n y_index = list(y.index)\n X_length = len(X_index)\n y_length = len(y_index)\n if X_length != y_length:\n results[\"warnings\"].append(\n DataCheckWarning(\n message=\"Input target and features have different lengths\",\n data_check_name=self.name,\n message_code=DataCheckMessageCode.MISMATCHED_LENGTHS,\n details={\n \"features_length\": X_length,\n \"target_length\": y_length,\n },\n ).to_dict()\n )\n\n if X_index != y_index:\n if set(X_index) == set(y_index):\n results[\"warnings\"].append(\n DataCheckWarning(\n message=\"Input target and features have mismatched indices order\",\n data_check_name=self.name,\n message_code=DataCheckMessageCode.MISMATCHED_INDICES_ORDER,\n details={},\n ).to_dict()\n )\n else:\n index_diff_not_in_X = list(set(y_index) - set(X_index))[:10]\n index_diff_not_in_y = list(set(X_index) - set(y_index))[:10]\n results[\"warnings\"].append(\n DataCheckWarning(\n message=\"Input target and features have mismatched indices\",\n data_check_name=self.name,\n message_code=DataCheckMessageCode.MISMATCHED_INDICES,\n details={\n \"indices_not_in_features\": index_diff_not_in_X,\n \"indices_not_in_target\": index_diff_not_in_y,\n },\n ).to_dict()\n )\n\n return results\n", "sub_path": "evalml/data_checks/invalid_targets_data_check.py", "file_name": "invalid_targets_data_check.py", "file_ext": "py", "file_size_in_byte": 12012, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "evalml.data_checks.DataCheck", "line_number": 25, "usage_type": "name"}, {"api_name": "evalml.problem_types.handle_problem_types", "line_number": 39, "usage_type": "call"}, {"api_name": "evalml.objectives.get_objective", "line_number": 40, "usage_type": "call"}, {"api_name": "evalml.data_checks.DataCheckError", "line_number": 72, "usage_type": "call"}, {"api_name": "evalml.data_checks.DataCheckMessageCode.TARGET_IS_NONE", "line_number": 75, "usage_type": "attribute"}, {"api_name": "evalml.data_checks.DataCheckMessageCode", "line_number": 75, "usage_type": "name"}, {"api_name": "evalml.utils.woodwork_utils.infer_feature_types", "line_number": 81, "usage_type": "call"}, {"api_name": "evalml.utils.woodwork_utils.numeric_and_boolean_ww", "line_number": 82, "usage_type": "name"}, {"api_name": "woodwork.logical_types", "line_number": 83, "usage_type": "attribute"}, {"api_name": "evalml.data_checks.DataCheckError", "line_number": 87, "usage_type": "call"}, {"api_name": "evalml.utils.woodwork_utils.numeric_and_boolean_ww", "line_number": 90, "usage_type": "name"}, {"api_name": "evalml.data_checks.DataCheckMessageCode.TARGET_UNSUPPORTED_TYPE", "line_number": 93, "usage_type": "attribute"}, {"api_name": "evalml.data_checks.DataCheckMessageCode", "line_number": 93, "usage_type": "name"}, {"api_name": "evalml.data_checks.DataCheckError", "line_number": 100, "usage_type": "call"}, {"api_name": "evalml.data_checks.DataCheckMessageCode.TARGET_IS_EMPTY_OR_FULLY_NULL", "line_number": 103, "usage_type": "attribute"}, {"api_name": "evalml.data_checks.DataCheckMessageCode", "line_number": 103, "usage_type": "name"}, {"api_name": "evalml.data_checks.DataCheckError", "line_number": 112, "usage_type": "call"}, {"api_name": "evalml.data_checks.DataCheckMessageCode.TARGET_HAS_NULL", "line_number": 117, "usage_type": "attribute"}, {"api_name": "evalml.data_checks.DataCheckMessageCode", "line_number": 117, "usage_type": "name"}, {"api_name": "evalml.problem_types.is_regression", "line_number": 125, "usage_type": "call"}, {"api_name": "evalml.data_checks.DataCheckAction", "line_number": 128, "usage_type": "call"}, {"api_name": "evalml.data_checks.DataCheckActionCode.IMPUTE_COL", "line_number": 129, "usage_type": "attribute"}, {"api_name": "evalml.data_checks.DataCheckActionCode", "line_number": 129, "usage_type": "name"}, {"api_name": "evalml.problem_types.is_binary", "line_number": 141, "usage_type": "call"}, {"api_name": "evalml.data_checks.DataCheckError", "line_number": 151, "usage_type": "call"}, {"api_name": "evalml.data_checks.DataCheckMessageCode.TARGET_BINARY_NOT_TWO_UNIQUE_VALUES", "line_number": 154, "usage_type": "attribute"}, {"api_name": "evalml.data_checks.DataCheckMessageCode", "line_number": 154, "usage_type": "name"}, {"api_name": "evalml.problem_types.ProblemTypes.REGRESSION", "line_number": 160, "usage_type": "attribute"}, {"api_name": "evalml.problem_types.ProblemTypes", "line_number": 160, "usage_type": "name"}, {"api_name": "evalml.data_checks.DataCheckError", "line_number": 164, "usage_type": "call"}, {"api_name": "evalml.data_checks.DataCheckMessageCode.TARGET_UNSUPPORTED_TYPE", "line_number": 167, "usage_type": "attribute"}, {"api_name": "evalml.data_checks.DataCheckMessageCode", "line_number": 167, "usage_type": "name"}, {"api_name": "evalml.problem_types.is_multiclass", "line_number": 172, "usage_type": "call"}, {"api_name": "evalml.data_checks.DataCheckError", "line_number": 181, "usage_type": "call"}, {"api_name": "evalml.data_checks.DataCheckMessageCode.TARGET_MULTICLASS_NOT_TWO_EXAMPLES_PER_CLASS", "line_number": 184, "usage_type": "attribute"}, {"api_name": "evalml.data_checks.DataCheckMessageCode", "line_number": 184, "usage_type": "name"}, {"api_name": "evalml.data_checks.DataCheckError", "line_number": 191, "usage_type": "call"}, {"api_name": "evalml.data_checks.DataCheckMessageCode.TARGET_MULTICLASS_NOT_ENOUGH_CLASSES", "line_number": 194, "usage_type": "attribute"}, {"api_name": "evalml.data_checks.DataCheckMessageCode", "line_number": 194, "usage_type": "name"}, {"api_name": "evalml.data_checks.DataCheckWarning", "line_number": 203, "usage_type": "call"}, {"api_name": "evalml.data_checks.DataCheckMessageCode.TARGET_MULTICLASS_HIGH_UNIQUE_CLASS", "line_number": 206, "usage_type": "attribute"}, {"api_name": "evalml.data_checks.DataCheckMessageCode", "line_number": 206, "usage_type": "name"}, {"api_name": "woodwork.logical_types", "line_number": 215, "usage_type": "attribute"}, {"api_name": "woodwork.logical_types", "line_number": 216, "usage_type": "attribute"}, {"api_name": "evalml.data_checks.DataCheckError", "line_number": 225, "usage_type": "call"}, {"api_name": "evalml.data_checks.DataCheckMessageCode.TARGET_INCOMPATIBLE_OBJECTIVE", "line_number": 228, "usage_type": "attribute"}, {"api_name": "evalml.data_checks.DataCheckMessageCode", "line_number": 228, "usage_type": "name"}, {"api_name": "evalml.utils.woodwork_utils.infer_feature_types", "line_number": 234, "usage_type": "call"}, {"api_name": "evalml.data_checks.DataCheckWarning", "line_number": 241, "usage_type": "call"}, {"api_name": "evalml.data_checks.DataCheckMessageCode.MISMATCHED_LENGTHS", "line_number": 244, "usage_type": "attribute"}, {"api_name": "evalml.data_checks.DataCheckMessageCode", "line_number": 244, "usage_type": "name"}, {"api_name": "evalml.data_checks.DataCheckWarning", "line_number": 255, "usage_type": "call"}, {"api_name": "evalml.data_checks.DataCheckMessageCode.MISMATCHED_INDICES_ORDER", "line_number": 258, "usage_type": "attribute"}, {"api_name": "evalml.data_checks.DataCheckMessageCode", "line_number": 258, "usage_type": "name"}, {"api_name": "evalml.data_checks.DataCheckWarning", "line_number": 266, "usage_type": "call"}, {"api_name": "evalml.data_checks.DataCheckMessageCode.MISMATCHED_INDICES", "line_number": 269, "usage_type": "attribute"}, {"api_name": "evalml.data_checks.DataCheckMessageCode", "line_number": 269, "usage_type": "name"}]} +{"seq_id": "578566153", "text": "from django.contrib import admin\nfrom django.urls import path, include\n\nfrom main_app import views\nfrom contract.views import ContractCreateView, ContractListView, create_contract_pdf, contract_detail\n\nurlpatterns = [\n path('', views.HomeView.as_view(), name='home'),\n path('login/', views.LoginView.as_view(), name='login'),\n path('logout/', views.logout_view, name='logout'),\n path('realAdmin/', admin.site.urls),\n path('pdf//', create_contract_pdf, name='contract-create-pdf'),\n]\n\nurlpatterns += [\n path('contract/create//', ContractCreateView.as_view(), name='contract-create'),\n path('contracts/', ContractListView.as_view(), name='contract-list'),\n path('contract//', contract_detail, name='contract-detail')\n]\n\nurlpatterns += [\n path('admin/', include('lawyer.urls'))\n]\n", "sub_path": "broker/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 854, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "main_app.views.HomeView.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "main_app.views.HomeView", "line_number": 8, "usage_type": "attribute"}, {"api_name": "main_app.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "main_app.views.LoginView.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "main_app.views.LoginView", "line_number": 9, "usage_type": "attribute"}, {"api_name": "main_app.views", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "main_app.views.logout_view", "line_number": 10, "usage_type": "attribute"}, {"api_name": "main_app.views", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 11, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "contract.views.create_contract_pdf", "line_number": 12, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "contract.views.ContractCreateView.as_view", "line_number": 16, "usage_type": "call"}, {"api_name": "contract.views.ContractCreateView", "line_number": 16, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "contract.views.ContractListView.as_view", "line_number": 17, "usage_type": "call"}, {"api_name": "contract.views.ContractListView", "line_number": 17, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "contract.views.contract_detail", "line_number": 18, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "420371245", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.init as init\nfrom torch.nn.utils.rnn import pack_padded_sequence\nimport typing\nimport torch.cuda\nimport math\nimport config\n\n\nclass Net(nn.Module):\n \"\"\" Re-implementation of ``Show, Ask, Attend, and Answer: A Strong Baseline For Visual Question Answering'' [0]\n\n [0]: https://arxiv.org/abs/1704.03162\n \"\"\"\n\n def __init__(self, embedding_tokens):\n super(Net, self).__init__()\n question_features = 1024\n vision_features = config.output_features\n glimpses = 2\n\n self.lstm_text = LSTMTextProcessor(\n embedding_tokens=embedding_tokens,\n embedding_features=300,\n lstm_features=question_features,\n drop=0.5,\n )\n\n #self.text = TextProcessor(\n self.cnn_text = CNNTextProcessor(\n embedding_tokens=embedding_tokens,\n embedding_features=300,\n #lstm_features=question_features,\n kernel_depth=question_features,\n drop=0.5,\n )\n self.attention = Attention(\n v_features=vision_features,\n q_features=question_features,\n mid_features=512,\n glimpses=2,\n drop=0.5,\n )\n self.classifier = Classifier(\n in_features=glimpses * vision_features + question_features,\n mid_features=1024,\n out_features=config.max_answers,\n drop=0.5,\n )\n\n for m in self.modules():\n if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):\n init.xavier_uniform(m.weight)\n if m.bias is not None:\n m.bias.data.zero_()\n\n self.gate = DualGatedLinearUnit()\n\n def forward(self, v, q, q_len):\n q_lstm = self.lstm_text(q, list(q_len.data))\n q_cnn = self.cnn_text(q, list(q_len.data))\n q = self.gate(q_lstm, q_cnn)\n v = v / (v.norm(p=2, dim=1, keepdim=True).expand_as(v) + 1e-8)\n a = self.attention(v, q)\n v = apply_attention(v, a)\n\n combined = torch.cat([v, q], dim=1)\n answer = self.classifier(combined)\n return answer\n\n\nclass DualGatedLinearUnit(nn.Module):\n def __init__(self):\n super(DualGatedLinearUnit, self).__init__()\n pass\n\n def forward(self, x, y):\n x_s = torch.nn.functional.sigmoid(x)\n y_s = torch.nn.functional.sigmoid(y)\n y = x_s * y + x * y_s\n return y\n\nclass Classifier(nn.Sequential):\n def __init__(self, in_features, mid_features, out_features, drop=0.0):\n super(Classifier, self).__init__()\n self.add_module('drop1', nn.Dropout(drop))\n self.add_module('lin1', nn.Linear(in_features, mid_features))\n self.add_module('relu', nn.ReLU())\n self.add_module('drop2', nn.Dropout(drop))\n self.add_module('lin2', nn.Linear(mid_features, out_features))\n\n\nclass LSTMTextProcessor(nn.Module):\n def __init__(self, embedding_tokens, embedding_features, lstm_features, drop=0.0):\n super(LSTMTextProcessor, self).__init__()\n self.embedding = nn.Embedding(embedding_tokens, embedding_features, padding_idx=0)\n self.drop = nn.Dropout(drop)\n self.tanh = nn.Tanh()\n self.lstm = nn.LSTM(input_size=embedding_features,\n hidden_size=lstm_features,\n num_layers=1)\n self.features = lstm_features\n\n self._init_lstm(self.lstm.weight_ih_l0)\n self._init_lstm(self.lstm.weight_hh_l0)\n self.lstm.bias_ih_l0.data.zero_()\n self.lstm.bias_hh_l0.data.zero_()\n\n init.xavier_uniform(self.embedding.weight)\n\n def _init_lstm(self, weight):\n for w in weight.chunk(4, 0):\n init.xavier_uniform(w)\n\n def forward(self, q, q_len):\n # print(q)\n embedded = self.embedding(q)\n # print(embedded.size())\n tanhed = self.tanh(self.drop(embedded))\n packed = pack_padded_sequence(tanhed, q_len, batch_first=True)\n _, (_, c) = self.lstm(packed)\n return c.squeeze(0)\n\nclass ConvBlock(nn.Module):\n def __init__(self, kernel_depth, embedding_features, kernel_width, max_k=1):\n super(ConvBlock, self).__init__()\n padding_size = math.ceil((kernel_width-1)/2)\n\n self.cnn_conv = nn.Conv2d(1, kernel_depth, (embedding_features + 2*padding_size, kernel_width), stride=1,\n padding=padding_size)\n self.activation = nn.ReLU()\n self.batchnorm = nn.BatchNorm1d(kernel_depth)\n\n def forward(self, x):\n x = self.cnn_conv(x)\n x = self.activation(self.batchnorm(x))\n x = x.transpose(1,2)\n return x\n\n# kernel_depth = 1024\nclass CNNTextProcessor(nn.Module):\n def __init__(self, embedding_tokens, embedding_features, kernel_depth, drop=0.0, kernel_width=3, layers=2, max_k=1, multilayer=True):\n super(CNNTextProcessor, self).__init__()\n self.kw = kernel_width\n self.multilayer = multilayer\n\n self.embedding = nn.Embedding(embedding_tokens, embedding_features, padding_idx=0)\n self.drop = nn.Dropout(drop)\n self.tanh = nn.Tanh()\n\n # single CNN case\n self.cnn_single_layer = ConvBlock(kernel_depth, embedding_features, kernel_width)\n\n # multi-layer case\n self.cnn_multi_l1 = ConvBlock(kernel_depth, embedding_features, kernel_width)\n self.cnn_multi_l2 = ConvBlock(kernel_depth, kernel_depth, kernel_width)\n self.cnn_multi_l3 = ConvBlock(kernel_depth, kernel_depth, kernel_width)\n\n self.pooling = nn.AdaptiveMaxPool2d((max_k, kernel_depth))\n\n init.xavier_uniform(self.embedding.weight)\n\n def forward(self, q : torch.cuda.LongTensor, q_len : int):\n\n embedded = self.embedding(q) # size: batch (128) * seq_len (23) * emb_len (300)\n tanhed = self.tanh(self.drop(embedded))\n\n c = torch.unsqueeze(tanhed.transpose(1,2), 1) # size: batch (128) * seq_len (23) * emb_len (300)\n\n # single layer\n if not self.multilayer:\n c = self.cnn_single_layer(c)\n # multi layer\n else:\n c = self.cnn_multi_l1(c)\n c = self.cnn_multi_l2(c)\n c = self.cnn_multi_l3(c)\n\n c = self.pooling(torch.squeeze(c))\n c = torch.squeeze(c) # flatten it\n return c.squeeze(0)\n\n\nclass Attention(nn.Module):\n def __init__(self, v_features, q_features, mid_features, glimpses, drop=0.0):\n super(Attention, self).__init__()\n self.v_conv = nn.Conv2d(v_features, mid_features, 1, bias=False) # let self.lin take care of bias\n self.q_lin = nn.Linear(q_features, mid_features)\n self.x_conv = nn.Conv2d(mid_features, glimpses, 1)\n\n self.drop = nn.Dropout(drop)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, v, q):\n v = self.v_conv(self.drop(v))\n q = self.q_lin(self.drop(q))\n q = tile_2d_over_nd(q, v)\n x = self.relu(v + q)\n x = self.x_conv(self.drop(x))\n return x\n\n\ndef apply_attention(input, attention):\n \"\"\" Apply any number of attention maps over the input.\n The attention map has to have the same size in all dimensions except dim=1.\n \"\"\"\n n, c = input.size()[:2]\n glimpses = attention.size(1)\n\n # flatten the spatial dims into the third dim, since we don't need to care about how they are arranged\n input = input.view(n, c, -1)\n attention = attention.view(n, glimpses, -1)\n s = input.size(2)\n\n # apply a softmax to each attention map separately\n # since softmax only takes 2d inputs, we have to collapse the first two dimensions together\n # so that each glimpse is normalized separately\n attention = attention.view(n * glimpses, -1)\n attention = F.softmax(attention)\n\n # apply the weighting by creating a new dim to tile both tensors over\n target_size = [n, glimpses, c, s]\n input = input.view(n, 1, c, s).expand(*target_size)\n attention = attention.view(n, glimpses, 1, s).expand(*target_size)\n weighted = input * attention\n # sum over only the spatial dimension\n weighted_mean = weighted.sum(dim=3)\n # the shape at this point is (n, glimpses, c, 1)\n return weighted_mean.view(n, -1)\n\n\ndef tile_2d_over_nd(feature_vector, feature_map):\n \"\"\" Repeat the same feature vector over all spatial positions of a given feature map.\n The feature vector should have the same batch size and number of features as the feature map.\n \"\"\"\n n, c = feature_vector.size()\n spatial_size = feature_map.dim() - 2\n tiled = feature_vector.view(n, c, *([1] * spatial_size)).expand_as(feature_map)\n return tiled\n", "sub_path": "model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 8666, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "torch.nn.Module", "line_number": 12, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "config.output_features", "line_number": 21, "usage_type": "attribute"}, {"api_name": "config.max_answers", "line_number": 49, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 54, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 54, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 54, "usage_type": "attribute"}, {"api_name": "torch.nn.init.xavier_uniform", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 55, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 74, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 74, "usage_type": "name"}, {"api_name": "torch.nn.functional.sigmoid", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 80, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.sigmoid", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 81, "usage_type": "attribute"}, {"api_name": "torch.nn.Sequential", "line_number": 85, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 85, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 88, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 89, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 90, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 91, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 92, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 95, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 95, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 98, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 99, "usage_type": "name"}, {"api_name": "torch.nn.Tanh", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 100, "usage_type": "name"}, {"api_name": "torch.nn.LSTM", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 101, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_uniform", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 111, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_uniform", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 115, "usage_type": "name"}, {"api_name": "torch.nn.utils.rnn.pack_padded_sequence", "line_number": 122, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 126, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 126, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 129, "usage_type": "call"}, {"api_name": "torch.nn.Conv2d", "line_number": 131, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 131, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 133, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 133, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 134, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 143, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 143, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 149, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 149, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 150, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 150, "usage_type": "name"}, {"api_name": "torch.nn.Tanh", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 151, "usage_type": "name"}, {"api_name": "torch.nn.AdaptiveMaxPool2d", "line_number": 161, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 161, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_uniform", "line_number": 163, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 163, "usage_type": "name"}, {"api_name": "torch.cuda", "line_number": 165, "usage_type": "attribute"}, {"api_name": "torch.unsqueeze", "line_number": 170, "usage_type": "call"}, {"api_name": "torch.squeeze", "line_number": 181, "usage_type": "call"}, {"api_name": "torch.squeeze", "line_number": 182, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 186, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 186, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 189, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 189, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 190, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 190, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 191, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 191, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 193, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 193, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 194, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 194, "usage_type": "name"}, {"api_name": "torch.nn.functional.softmax", "line_number": 221, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 221, "usage_type": "name"}]} +{"seq_id": "558830154", "text": "\n\ndef listInstruments(api, cfg):\n instResp = api.account.instruments(cfg.active_account)\n instruments = instResp.get('instruments','200')\n stuff = [{\"name\": i.name, \"pipLocation\": i.pipLocation,\n \"displayPrecision\": i.displayPrecision,\n \"marginRate\": i.marginRate,\n \"minimumTrailingStopDistance\": i.minimumTrailingStopDistance} for i in instruments]\n return stuff\n\n\ndef collectInstruments(api, cfg, dir):\n import json\n stuff = listInstruments(api, cfg)\n with open((dir + \"/instruments.json\"), 'w') as jsonf:\n json.dump(stuff, jsonf)\n\n\ndef collectForMonth(api, pair, year, month, dir, granularity=\"S5\", refresh=True):\n kwargs = {\"granularity\":granularity, \"price\":\"MBA\"}\n\n batches = ( (\"00:00:00\", \"03:59:59\"), (\"04:00:00\", \"07:59:59\"),\n (\"08:00:00\", \"11:59:59\"), (\"12:00:00\", \"15:59:59\"),\n (\"16:00:00\", \"19:59:59\"), (\"20:00:00\", \"23:59:59\"))\n\n precache = []\n\n daysOfMonth = (00,31,28,31,30,31,30,31,31,30,31,30,31)\n days = daysOfMonth[month]\n month = str(month)\n if(len(month)==1): month = \"0\" + month\n if(month==\"02\"):\n if(year % 4 == 0 and (not year % 100 ==0 or year % 400 ==0)):\n days+=1\n\n if(not refresh):\n from candlecache import SliceRowIterator\n _since = \"{}-{}-{}T00:00:00.000000000Z\".format(year, month, \"01\")\n _till = \"{}-{}-{}T23:59:59:999999999Z\".format(year, month, days)\n precache = list( SliceRowIterator(dir,pair,granularity, _since, _till, api) )\n\n allCandles = []\n timeToBreak = False\n\n if(len(precache)>0 and not refresh):\n for c in precache:\n row = [c.time, c.bid.o, c.ask.o, c.mid.o, c.bid.l, c.ask.l, c.mid.l, c.bid.h, c.ask.h, c.mid.h, c.bid.c, c.ask.c, c.mid.c, c.volume ]\n allCandles.append(row)\n\n header = [\"time\", \"bid-o\", \"ask-o\", \"mid-o\", \"bid-l\", \"ask-l\", \"mid-l\", \"bid-h\", \"ask-h\", \"mid-h\", \"bid-c\", \"ask-c\", \"mid-c\", \"volume\"]\n for day in range(days):\n d = str(day+1)\n if(len(d)==1): d = \"0\" + d\n\n dstamp = \"{}-{}-{}\".format(year,month,d)\n for b in batches:\n fromTs = \"{}T{}.000000000Z\".format(dstamp, b[0])\n toTs = \"{}T{}.000000000Z\".format(dstamp, b[1])\n kwargs[\"fromTime\"]=fromTs\n kwargs[\"toTime\"]=toTs\n if(not refresh):\n if(len(allCandles)>0 and toTs < allCandles[-1][0]):\n # no need to refresh what is way in the past...\n continue\n\n print(kwargs)\n resp = api.instrument.candles(pair, **kwargs)\n # import pdb; pdb.set_trace()\n if(str(resp.status) != '200'):\n\n if(\"errorMessage\" in resp.body and resp.body[\"errorMessage\"] == \"Invalid value specified for 'to'. Time is in the future\"):\n del kwargs['toTime']\n print(('future is not accepted', kwargs))\n resp = api.instrument.candles(pair, **kwargs)\n timeToBreak = True\n else:\n print(resp.body)\n\n candles = resp.get('candles',200)\n candles.sort(key=lambda a: a.time)\n for c in candles:\n if(len(allCandles)>0 and c.time < allCandles[-1][0]): continue\n row = [c.time, c.bid.o, c.ask.o, c.mid.o, c.bid.l, c.ask.l, c.mid.l, c.bid.h, c.ask.h, c.mid.h, c.bid.c, c.ask.c, c.mid.c, c.volume ]\n allCandles.append(row)\n if(timeToBreak): break\n if(timeToBreak): break\n return allCandles\n\n\ndef fileLocation(pair, year, month, dir, granularity):\n month = str(month)\n if(len(month)==1): month = \"0\" + month\n return \"{}/{}-{}.{}.{}.csv\".format(dir, year, month, pair, granularity)\n\ndef writeCollectedCandles(pair, year, month, dir, granularity, allCandles):\n import csv\n with open( fileLocation(pair, year, month, dir, granularity), \"w\") as outf:\n csvwriter = csv.writer(outf)\n for r in allCandles:\n csvwriter.writerow(r)\n\n\nif __name__ == '__main__': \n import argparse, re, pdb, time\n import oandaconfig\n import v20, csv\n\n from myt_support import TradeLoop, trailSpecsFromStringParam, getSortedCandles, getBacktrackingCandles, PositionFactory\n\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--select', nargs='?',\n help=\"valid currency-pair\")\n parser.add_argument('--dir', nargs='?', help='target directory')\n parser.add_argument('--slice', nargs='?', default=\"M1\")\n parser.add_argument('--quick', action='store_true', help='to quickly extend a cache for what is missing in the month')\n parser.add_argument('--year', nargs='?', type=int, default = None)\n parser.add_argument('--month', nargs='?', type=int, default = None)\n parser.add_argument('--instruments', action='store_true')\n\n args = parser.parse_args()\n\n\n cfg = oandaconfig.Config()\n cfg.load(\"~/.v20.conf\")\n api = v20.Context( cfg.hostname, cfg.port, token = cfg.token)\n\n import datetime\n today = datetime.date.today()\n if(args.year is None): args.year = today.year\n if(args.month is None): args.month = today.month\n\n\n if(not args.instruments):\n candles = collectForMonth(api, args.select, args.year, args.month, args.dir, args.slice, not args.quick)\n writeCollectedCandles(args.select, args.year, args.month, args.dir, args.slice, candles)\n else:\n collectInstruments(api, cfg, args.dir)\n", "sub_path": "py3/extractor.py", "file_name": "extractor.py", "file_ext": "py", "file_size_in_byte": 5540, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "json.dump", "line_number": 17, "usage_type": "call"}, {"api_name": "candlecache.SliceRowIterator", "line_number": 41, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 99, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 112, "usage_type": "call"}, {"api_name": "oandaconfig.Config", "line_number": 126, "usage_type": "call"}, {"api_name": "v20.Context", "line_number": 128, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 131, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 131, "usage_type": "attribute"}]} +{"seq_id": "23902924", "text": "\n#Código Python: Genera un calendario de acuerdo al año anterior\n#Desarrollando por David Martinez\n\nfrom tabulate import tabulate# pip install tabulate\nimport os\n\ndef mostrar_entrada():\n\n\tos.system('clear')# Limpiar pantalla en cada ejecución\n\twhile True:\n\n\t\t# Menu con los indices de los dias de la semana\n\t\tprint('-'*2,'MENU','-'*2,'\\n')\n\t\tDIAS = ['Domingo','Lunes','Martes','Miercoles','Jueves','Viernes','Sabado']\n\t\tfor a,b in enumerate(DIAS,1):print(f'{a}:{b}')\n\t\t\n\t\t# Solicito al usuario un número, si hay un error vuelvo al bucle.\n\t\ttry:\n\t\t\tentrada = input('Ingresa el año nuevo e indice del dia final del año pasado (2021,5)>>')\n\t\t\taño,dia = [int(x) for x in entrada.split(',')]\n\t\t\t\n\t\t\tif dia > 0 and dia <= 7 and año > 0:\n\t\t\t\treturn dia + 1,año\n\t\t\telse:\n\t\t\t\tprint(f'La entrada {año},{dia} no es valida, intentalo de nuevo')\n\t\texcept:\n\t\t\tprint(f'La entrada no es valida, intentalo de nuevo')\n\n\ndef mes_n(mes_n,dia_n):\n\n\t\n\tsemana_1 = [None for y in range(int(dia_n) - 1)]# None por indice de inicio\n\tc = dia_n - 1\n\tfor i in range(dia_n,8):\n\t\tc+=1\n\t\tsemana_1.insert(c,(i + 1) - dia_n)# i+1 porque la semana_1 inicia en 1\n\n\t# Resto 9 para omitir los espacios vacios en la semana_1\t\n\tmes = list(range(9 - dia_n,mes_n + 1))\n\tsemanas = [semana_1]\n\tporciones = 0\n\n\tfor i in range(1,6):\n\t\tporciones+=7\n\t\tsemanas.insert(i,mes[porciones - 7:porciones])# Agrego las semanas a la lista\n\n\t# Si existe una semana vacia al final, se elimina.\n\tif semanas[-1] == []:semanas.pop();return semanas;\n\telse:\n\t\treturn semanas\n\ndef mostrar_calendario():\n\n\n\tdia,año = mostrar_entrada()# El día final del año pasado,año nuevo \n\tDIAS = ['Domingo','Lunes','Martes','Miercoles','Jueves','Viernes','Sabado']\n\tENERO=MARZO=MAYO=JULIO=AGOSTO=OCTUBRE=DICIEMBRE = 31\n\tABRIL=JUNIO=SEPTIEMBRE=NOVIEMBRE = 30\n\n\t\n\tif año % 2 == 0 and año % 4==0:FEBRERO = 29;# Años bisiestos y no bisiestos\n\telse:FEBRERO=28\n\n\tMESES = {'ENERO':ENERO,'FEBRERO':FEBRERO,'MARZO':MARZO,\n\t 'ABRIL':ABRIL,'MAYO':MAYO,'JUNIO':JUNIO,\n\t 'JULIO':JULIO,'AGOSTO':AGOSTO,'SEPTIEMBRE':SEPTIEMBRE,\n\t 'OCTUBRE':OCTUBRE,'NOVIEMBRE':NOVIEMBRE,'DICIEMBRE':DICIEMBRE\n\t }\n\n\tprimeros_dias = [dia]# Guardo los primeros dias de cada mes.\t\n\n\tfor a,b,c in zip(MESES.keys(),MESES.values(),primeros_dias):\n\t\tsemanas_n = mes_n(b,c) \n\t\tdia_p = len(semanas_n[-1]) + 1# semanas_n[-1]+1 Obtengo el primer día del mes sgt.\n\t\tprimeros_dias.append(dia_p)\n\t\tprint('\\n','-'*30,'{:^5} DE {:^5}'.format(a,año),'-'*32)# {:^5} ancho de columna\n\t\tprint(tabulate(semanas_n,headers = DIAS,tablefmt = 'fancy_grid',numalign = 'center'))\t\n\nmostrar_calendario()\n\n\n\n\n\n\n\n", "sub_path": "Scripts/Office/calendar.py", "file_name": "calendar.py", "file_ext": "py", "file_size_in_byte": 2619, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "os.system", "line_number": 10, "usage_type": "call"}, {"api_name": "tabulate.tabulate", "line_number": 79, "usage_type": "call"}]} +{"seq_id": "43943967", "text": "\r\nfrom django.conf.urls import url\r\nfrom . import views\r\nfrom django.conf import settings\r\nfrom django.conf.urls.static import static\r\n\r\napp_name = 'home'\r\nurlpatterns = [\r\n url(r'^$', views.index, name=\"index\"),\r\n url(r'^student_login/$', views.student_login, name=\"student_login\"),\r\n url(r'^logout/$', views.logout, name=\"logout\"),\r\n url(r'^dashboard/$', views.dashboard_faculty, name=\"dashboard\"),\r\n url(r'^add_student/$', views.add_student, name=\"add_student\"),\r\n]\r\n\r\nurlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\r\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\r\n", "sub_path": "home/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 645, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.static.static", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.settings.STATIC_URL", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 16, "usage_type": "name"}, {"api_name": "django.conf.settings.STATIC_ROOT", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.conf.urls.static.static", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 17, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 17, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 17, "usage_type": "attribute"}]} +{"seq_id": "652691302", "text": "import numpy as np\n\nfrom PIL import Image\nfrom PIL import ImageFile\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\nimport tkinter\nimport cv2\nimport PIL\nfrom PIL import Image\nfrom PIL import ImageTk\nimport os\n\nimport program_param as pp\nimport data_transformation as dt\n\n\n\n\nclass WindowApp():\n def __init__(self):\n self.window = tkinter.Tk()\n self.width = pp.video_stream.get(cv2.CAP_PROP_FRAME_WIDTH)\n self.height = pp.video_stream.get(cv2.CAP_PROP_FRAME_HEIGHT)\n self.canvas = tkinter.Canvas(self.window, width=1920, height=1080)\n self.var = tkinter.StringVar()\n\n self.var.trace(\"w\", self.__callback)\n self.__act_video_source()\n button = tkinter.Button(self.window, text=\"Rozpocznij/Zatrzymaj\", command=self.__buttoncallback)\n button.place(x=1590, y=100)\n\n self.canvas.pack()\n\n def draw_result(self,frame,frame_raw,result,score,probabilities):\n # draw rectagle [SIZE_PICTxSIZE_PICT] int center frame\n a = (int((640 / 2) - (pp.size_pict + 20) / 2), int((480 / 2) - (pp.size_pict + 20) / 2))\n b = (int((640 / 2) + (pp.size_pict + 20) / 2), int((480 / 2) + (pp.size_pict + 20) / 2))\n cv2.rectangle(frame, a, b, (255, 0, 0), 2)\n\n cv2.putText(frame, '%s' % (result), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 3)\n cv2.putText(frame, '(score = %0.5f)' % (float(score)), (50, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255),\n 2)\n\n all_frame = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))\n self.canvas.create_image(0, 0, image=all_frame, anchor=tkinter.NW)\n\n ear_frame = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame_raw))\n self.canvas.create_image(660, (640 - pp.input_size[0]) / 2, image=ear_frame, anchor=tkinter.W)\n\n self.canvas.create_text(760 + pp.input_size[0], 60, font=(\"Purisa\", 22), text=\"Rozkład prawdopodobieństw: \",\n anchor=tkinter.W)\n for i in range(0, len(dt.ear_class)):\n procet = int(float(probabilities[i]) * 100.0)\n text = \" \" + str(dt.ear_class[i]) + \": \" + str(probabilities[i]) + \" -> \" + str(procet) + \"% \"\n label = tkinter.Label(text=text, anchor=tkinter.W, font=(\"Purisa\", 18))\n self.canvas.create_window(980 + pp.input_size[0], 110 + i * 30, window=label)\n\n if max(probabilities) > 0.50:\n pred_person = str(dt.ear_class[np.argmax(probabilities)])\n text = \" Rozpoznana osoba -> \" + pred_person\n label = tkinter.Label(text=text, anchor=tkinter.CENTER, font=(\"Purisa\", 22))\n win = self.canvas.create_window(1000, 350 + len(dt.ear_class) * 30, window=label)\n\n pilImage = Image.open(pp.data_dir + '/Train/' + pred_person + '/1_1.jpg')\n image = ImageTk.PhotoImage(pilImage)\n self.canvas.create_image(1050, 500 + len(dt.ear_class) * 30, image=image)\n else:\n text = \" Rozpoznana osoba -> \"\n label = tkinter.Label(text=text, anchor=tkinter.CENTER, font=(\"Purisa\", 22))\n win = self.canvas.create_window(1000, 350 + len(dt.ear_class) * 30, window=label)\n pred_person = \"0\"\n\n self.window.update()\n\n def draw_window(self,frame,result,score,probabilities):\n # draw rectagle [SIZE_PICTxSIZE_PICT] int center frame\n a = (int((640 / 2) - (pp.size_pict + 20) / 2), int((480 / 2) - (pp.size_pict + 20) / 2))\n b = (int((640 / 2) + (pp.size_pict + 20) / 2), int((480 / 2) + (pp.size_pict + 20) / 2))\n cv2.rectangle(frame, a, b, (255, 0, 0), 2)\n\n cv2.putText(frame, '%s' % (result), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 3)\n cv2.putText(frame, '(score = %0.5f)' % (float(score)), (50, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255),\n 2)\n\n image_raw, image = dt.preprocess(frame)\n\n all_frame = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))\n self.canvas.create_image(0, 0, image=all_frame, anchor=tkinter.NW)\n\n ear_frame = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(image_raw))\n self.canvas.create_image(660, (640 - pp.input_size[0]) / 2, image=ear_frame, anchor=tkinter.W)\n\n pred_person = str(dt.ear_class[np.argmax(probabilities)])\n\n if pred_person != \"0\":\n try:\n pilImage = Image.open(pp.data_dir + '/Train/' + pred_person + '/1_1.jpg')\n image = ImageTk.PhotoImage(pilImage)\n self.canvas.create_image(1050, 500 + len(dt.ear_class) * 30, image=image)\n except:\n pass\n\n self.window.update()\n\n def __callback(self,*args):\n pass\n # cv2.VideoCapture(pp.camera_source).release()\n # pp.camera_source = '/dev/' + str(self.var.get())\n # try:\n # vid = cv2.VideoCapture(pp.camera_source)\n # if not vid.isOpened():\n # raise ValueError(\"Unable to open video source\", pp.camera_source)\n # except:\n # print(\"Unable to open video source \" + str(pp.camera_source))\n\n def __buttoncallback(self,*args):\n pp.flag_start = not (pp.flag_start)\n print(\"Program on -> \" + str(pp.flag_start))\n\n def __act_video_source(self):\n available_stream = []\n all_stream = os.listdir('/dev/')\n for item in all_stream:\n if item.find('video') != -1:\n available_stream.append(item)\n available_stream = sorted(available_stream)\n print('wszystkie streamy ' + str(available_stream))\n\n label = tkinter.Label(text=\"Źródło obrazu -> \")\n\n self.var.set(available_stream[0])\n stream_box = tkinter.OptionMenu(self.window, self.var, *available_stream)\n label.place(x=1587, y=65)\n stream_box.place(x=1700, y=60)\n\n\n", "sub_path": "tkinter_window.py", "file_name": "tkinter_window.py", "file_ext": "py", "file_size_in_byte": 5851, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "PIL.ImageFile.LOAD_TRUNCATED_IMAGES", "line_number": 5, "usage_type": "attribute"}, {"api_name": "PIL.ImageFile", "line_number": 5, "usage_type": "name"}, {"api_name": "tkinter.Tk", "line_number": 22, "usage_type": "call"}, {"api_name": "program_param.video_stream.get", "line_number": 23, "usage_type": "call"}, {"api_name": "program_param.video_stream", "line_number": 23, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_WIDTH", "line_number": 23, "usage_type": "attribute"}, {"api_name": "program_param.video_stream.get", "line_number": 24, "usage_type": "call"}, {"api_name": "program_param.video_stream", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_HEIGHT", "line_number": 24, "usage_type": "attribute"}, {"api_name": "tkinter.Canvas", "line_number": 25, "usage_type": "call"}, {"api_name": "tkinter.StringVar", "line_number": 26, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 30, "usage_type": "call"}, {"api_name": "program_param.size_pict", "line_number": 37, "usage_type": "attribute"}, {"api_name": "program_param.size_pict", "line_number": 38, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 41, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 42, "usage_type": "attribute"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 45, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 45, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 45, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 45, "usage_type": "attribute"}, {"api_name": "tkinter.NW", "line_number": 46, "usage_type": "attribute"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 48, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 48, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 48, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 48, "usage_type": "attribute"}, {"api_name": "program_param.input_size", "line_number": 49, "usage_type": "attribute"}, {"api_name": "tkinter.W", "line_number": 49, "usage_type": "attribute"}, {"api_name": "program_param.input_size", "line_number": 51, "usage_type": "attribute"}, {"api_name": "tkinter.W", "line_number": 52, "usage_type": "attribute"}, {"api_name": "data_transformation.ear_class", "line_number": 53, "usage_type": "attribute"}, {"api_name": "data_transformation.ear_class", "line_number": 55, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 56, "usage_type": "call"}, {"api_name": "tkinter.W", "line_number": 56, "usage_type": "attribute"}, {"api_name": "program_param.input_size", "line_number": 57, "usage_type": "attribute"}, {"api_name": "data_transformation.ear_class", "line_number": 60, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 60, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 62, "usage_type": "call"}, {"api_name": "tkinter.CENTER", "line_number": 62, "usage_type": "attribute"}, {"api_name": "data_transformation.ear_class", "line_number": 63, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 65, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 65, "usage_type": "name"}, {"api_name": "program_param.data_dir", "line_number": 65, "usage_type": "attribute"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 66, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 66, "usage_type": "name"}, {"api_name": "data_transformation.ear_class", "line_number": 67, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 70, "usage_type": "call"}, {"api_name": "tkinter.CENTER", "line_number": 70, "usage_type": "attribute"}, {"api_name": "data_transformation.ear_class", "line_number": 71, "usage_type": "attribute"}, {"api_name": "program_param.size_pict", "line_number": 78, "usage_type": "attribute"}, {"api_name": "program_param.size_pict", "line_number": 79, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 80, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 82, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 82, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 83, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 83, "usage_type": "attribute"}, {"api_name": "data_transformation.preprocess", "line_number": 86, "usage_type": "call"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 88, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 88, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 88, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 88, "usage_type": "attribute"}, {"api_name": "tkinter.NW", "line_number": 89, "usage_type": "attribute"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 91, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 91, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 91, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 91, "usage_type": "attribute"}, {"api_name": "program_param.input_size", "line_number": 92, "usage_type": "attribute"}, {"api_name": "tkinter.W", "line_number": 92, "usage_type": "attribute"}, {"api_name": "data_transformation.ear_class", "line_number": 94, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 94, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 98, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 98, "usage_type": "name"}, {"api_name": "program_param.data_dir", "line_number": 98, "usage_type": "attribute"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 99, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 99, "usage_type": "name"}, {"api_name": "data_transformation.ear_class", "line_number": 100, "usage_type": "attribute"}, {"api_name": "program_param.flag_start", "line_number": 118, "usage_type": "attribute"}, {"api_name": "program_param.flag_start", "line_number": 119, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 123, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 130, "usage_type": "call"}, {"api_name": "tkinter.OptionMenu", "line_number": 133, "usage_type": "call"}]} +{"seq_id": "180357914", "text": "import torch as t\nfrom Blocks.Layers import CnnEncoder\n\nclass EncoderLaw(t.nn.Module):\n def __init__(self, embedding_dim, num_filter, output_dim):\n super(EncoderLaw, self).__init__()\n self.output_dim = output_dim\n self.ce = CnnEncoder(embedding_dim, num_filter, output_dim=output_dim)\n\n def forward(self, inputs, mask):\n \"\"\"\n \n :param inputs: [batch, seq_len, law_len, emb]\n :param mask: [batch, seq_len, law_len]\n :return: \n \"\"\"\n raw_shape = inputs.size()\n inputs = inputs.view((-1, raw_shape[2], raw_shape[3]))\n mask = mask.view((-1, raw_shape[2]))\n net = self.ce(inputs, mask)\n net = net.view((raw_shape[0], raw_shape[1], self.output_dim))\n return net\n\n\n#\n#\n# ce = CnnEncoder(300,100)\n# a = t.randn((64,30,100,300))\n\n\n#\n#\n#\n# law = t.randn((64,202,20,300))\n# law_mask = t.randn((64,202,20))\n", "sub_path": "Blocks/EncoderLaw.py", "file_name": "EncoderLaw.py", "file_ext": "py", "file_size_in_byte": 905, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "torch.nn", "line_number": 4, "usage_type": "attribute"}, {"api_name": "Blocks.Layers.CnnEncoder", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "53119737", "text": "from __res import find, count, key_by_index\nfrom __res import resfs_resolve, resfs_read, resfs_src, resfs_files # noqa\n\nimport six\n\n\ndef iterkeys(prefix='', strip_prefix=False):\n decode = lambda s: s\n if isinstance(prefix, six.text_type):\n prefix = prefix.encode('utf-8')\n decode = lambda s: s.decode('utf-8')\n\n for i in six.moves.range(count()):\n key = key_by_index(i)\n if key.startswith(prefix):\n if strip_prefix:\n key = key[len(prefix):]\n yield decode(key)\n\n\ndef iteritems(prefix=b'', strip_prefix=False):\n for key in iterkeys(prefix=prefix):\n value = find(key)\n if strip_prefix:\n key = key[len(prefix):]\n yield key, value\n", "sub_path": "library/python/resource/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 736, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "six.text_type", "line_number": 9, "usage_type": "attribute"}, {"api_name": "six.moves.range", "line_number": 13, "usage_type": "call"}, {"api_name": "six.moves", "line_number": 13, "usage_type": "attribute"}, {"api_name": "__res.count", "line_number": 13, "usage_type": "call"}, {"api_name": "__res.key_by_index", "line_number": 14, "usage_type": "call"}, {"api_name": "__res.find", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "35406580", "text": "\"\"\"\nCascaded Convolution Model\n\n- Pranav Shrestha (ps2958)\n- Jeffrey Wan (jw3468)\n\n\"\"\"\nimport os\nimport pickle\nimport numpy as np \nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom keras.preprocessing import text, sequence\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.utils import to_categorical\nfrom keras.models import Model, Input\nfrom keras.layers import Embedding, Dense, TimeDistributed, Concatenate, BatchNormalization, Lambda\nfrom keras.layers import Bidirectional, Activation, Dropout, CuDNNGRU, Conv1D\nfrom keras import backend as K\n\nfrom sklearn.model_selection import train_test_split, KFold\nfrom keras.metrics import categorical_accuracy\nfrom keras import backend as K\nfrom keras.regularizers import l1, l2\nfrom keras.optimizers import Adam\nimport tensorflow as tf\nfrom matplotlib import pyplot\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n\n### Data Retrieval\n# cb6133 = np.load(\"../data/cb6133.npy\")\ncb6133filtered = np.load(\"../data/cb6133filtered.npy\")\ncb513 = np.load(\"../data/cb513.npy\")\n\nprint()\n# print(cb6133.shape)\nprint(cb6133filtered.shape)\nprint(cb513.shape)\n\nmaxlen_seq = r = 700 # protein residues padded to 700\nf = 57 # number of features for each residue\n\nresidue_list = list('ACEDGFIHKMLNQPSRTWVYX') + ['NoSeq']\nq8_list = list('LBEGIHST') + ['NoSeq']\n\ncolumns = [\"id\", \"len\", \"input\", \"profiles\", \"expected\"]\n\ndef get_data(arr, bounds=None):\n \n if bounds is None: bounds = range(len(arr))\n \n data = [None for i in bounds]\n for i in bounds:\n seq, q8, profiles = '', '', []\n for j in range(r):\n jf = j*f\n \n # Residue convert from one-hot to decoded\n residue_onehot = arr[i,jf+0:jf+22]\n residue = residue_list[np.argmax(residue_onehot)]\n\n # Q8 one-hot encoded to decoded structure symbol\n residue_q8_onehot = arr[i,jf+22:jf+31]\n residue_q8 = q8_list[np.argmax(residue_q8_onehot)]\n\n if residue == 'NoSeq': break # terminating sequence symbol\n\n nc_terminals = arr[i,jf+31:jf+33] # nc_terminals = [0. 0.]\n sa = arr[i,jf+33:jf+35] # sa = [0. 0.]\n profile = arr[i,jf+35:jf+57] # profile features\n \n seq += residue # concat residues into amino acid sequence\n q8 += residue_q8 # concat secondary structure into secondary structure sequence\n profiles.append(profile)\n \n data[i] = [str(i+1), len(seq), seq, np.array(profiles), q8]\n \n return pd.DataFrame(data, columns=columns)\n\n### Train-test Specification\ntrain_df = get_data(cb6133filtered)\ntest_df = get_data(cb513)\n\n# The custom accuracy metric used for this task\ndef accuracy(y_true, y_pred):\n y = tf.argmax(y_true, axis =- 1)\n y_ = tf.argmax(y_pred, axis =- 1)\n mask = tf.greater(y, 0)\n return K.cast(K.equal(tf.boolean_mask(y, mask), tf.boolean_mask(y_, mask)), K.floatx())\n\n# Maps the sequence to a one-hot encoding\ndef onehot_to_seq(oh_seq, index):\n s = ''\n for o in oh_seq:\n i = np.argmax(o)\n if i != 0:\n s += index[i]\n else:\n break\n return s\n\ndef seq2onehot(seq, n):\n out = np.zeros((len(seq), maxlen_seq, n))\n for i in range(len(seq)):\n for j in range(maxlen_seq):\n out[i, j, seq[i, j]] = 1\n return out\n\n# Computes and returns the n-grams of a particualr sequence, defaults to trigrams\ndef seq2ngrams(seqs, n = 1):\n return np.array([[seq[i : i + n] for i in range(len(seq))] for seq in seqs])\n\n# Loading and converting the inputs to trigrams\ntrain_input_seqs, train_target_seqs = \\\n train_df[['input', 'expected']][(train_df.len.astype(int) <= maxlen_seq)].values.T\ntrain_input_grams = seq2ngrams(train_input_seqs)\n\n# Same for test\ntest_input_seqs = test_df['input'].values.T\ntest_input_grams = seq2ngrams(test_input_seqs)\n\n# Initializing and defining the tokenizer encoders and decoders based on the train set\ntokenizer_encoder = Tokenizer()\ntokenizer_encoder.fit_on_texts(train_input_grams)\ntokenizer_decoder = Tokenizer(char_level = True)\ntokenizer_decoder.fit_on_texts(train_target_seqs)\n\n# Using the tokenizer to encode and decode the sequences for use in training\n# Inputs\ntrain_input_data = tokenizer_encoder.texts_to_sequences(train_input_grams)\ntrain_input_data = sequence.pad_sequences(train_input_data,\n maxlen = maxlen_seq, padding='post')\n\n# Targets\ntrain_target_data = tokenizer_decoder.texts_to_sequences(train_target_seqs)\ntrain_target_data = sequence.pad_sequences(train_target_data,\n maxlen = maxlen_seq, padding='post')\ntrain_target_data = to_categorical(train_target_data)\n\n# Use the same tokenizer defined on train for tokenization of test\ntest_input_data = tokenizer_encoder.texts_to_sequences(test_input_grams)\ntest_input_data = sequence.pad_sequences(test_input_data,\n maxlen = maxlen_seq, padding='post')\n\n# Computing the number of words and number of tags for the keras model\nn_words = len(tokenizer_encoder.word_index) + 1\nn_tags = len(tokenizer_decoder.word_index) + 1\n\ntrain_input_data_alt = train_input_data\ntrain_input_data = seq2onehot(train_input_data, n_words)\ntrain_profiles = train_df.profiles.values\n\ntest_input_data_alt = test_input_data\ntest_input_data = seq2onehot(test_input_data, n_words)\ntest_profiles = test_df.profiles.values\n\ntrain_profiles_np = np.zeros((len(train_profiles), maxlen_seq, 22))\nfor i, profile in enumerate(train_profiles):\n for j in range(profile.shape[0]):\n for k in range(profile.shape[1]):\n train_profiles_np[i, j, k] = profile[j, k]\n\ntest_profiles_np = np.zeros((len(test_profiles), maxlen_seq, 22))\nfor i, profile in enumerate(test_profiles):\n for j in range(profile.shape[0]):\n for k in range(profile.shape[1]):\n test_profiles_np[i, j, k] = profile[j, k]\n\ndef decode_results(y_, reverse_decoder_index):\n print(\"prediction: \" + str(onehot_to_seq(y_, reverse_decoder_index).upper()))\n return str(onehot_to_seq(y_, reverse_decoder_index).upper())\n\ndef run_test(_model, data1, data2, data3, csv_name, npy_name):\n reverse_decoder_index = {value:key for key,value in tokenizer_decoder.word_index.items()}\n reverse_encoder_index = {value:key for key,value in tokenizer_encoder.word_index.items()}\n \n # Get predictions using our model\n y_test_pred = _model.predict([data1, data2, data3])\n\n decoded_y_pred = []\n for i in range(len(test_input_data)):\n res = decode_results(y_test_pred[i], reverse_decoder_index)\n decoded_y_pred.append(res)\n\n # Set Columns\n out_df = pd.DataFrame()\n out_df[\"id\"] = test_df.id.values\n out_df[\"expected\"] = decoded_y_pred\n\n # Save results\n with open(csv_name, \"w\") as f:\n out_df.to_csv(f, index=False)\n\n np.save(npy_name, y_test_pred)\n\ndef run_test_single_input(_model, data1, csv_name, npy_name):\n reverse_decoder_index = {value:key for key,value in tokenizer_decoder.word_index.items()}\n reverse_encoder_index = {value:key for key,value in tokenizer_encoder.word_index.items()}\n \n # Get predictions using our model\n y_test_pred = _model.predict(data1)\n\n decoded_y_pred = []\n for i in range(len(data1[:])):\n res = decode_results(y_test_pred[i], reverse_decoder_index)\n decoded_y_pred.append(res)\n\n # Set Columns\n out_df = pd.DataFrame()\n out_df[\"id\"] = test_df.id.values\n out_df[\"expected\"] = decoded_y_pred\n\n # Save results\n with open(csv_name, \"w\") as f:\n out_df.to_csv(f, index=False)\n\n np.save(npy_name, y_test_pred)\n\n # load ground truth\n gt_all = [line.strip().split(',')[3] for line in open('cb513test_solution.csv').readlines()]\n predictions = decoded_y_pred\n acc_list = []\n \n # calculating accuracy \n def get_acc(gt,pred):\n assert len(gt)== len(pred)\n correct = 0\n for i in range(len(gt)):\n if gt[i]==pred[i]:\n correct+=1\n \n return (1.0*correct)/len(gt)\n\n # compute accuracy\n for gt,pred in zip(gt_all,predictions):\n if len(gt) == len(pred):\n acc = get_acc(gt,pred)\n acc_list.append(acc)\n\n print ('mean accuracy is', np.mean(acc_list))\n\n\n\"\"\" Run below for a single run \"\"\"\ndef train(X_train, y_train, X_val=None, y_val=None):\n \"\"\"\n Define model and use this function for training\n \"\"\"\n model = create_CNN()\n assert(model is not None)\n model.compile(\n optimizer=Adam(lr=0.0002),\n loss = \"categorical_crossentropy\",\n metrics = [\"accuracy\", accuracy])\n \n if X_val is not None and y_val is not None:\n history = model.fit( X_train, y_train,\n batch_size = 128, epochs = 30,\n validation_data = (X_val, y_val))\n else:\n history = model.fit( X_train, y_train,\n batch_size = 128, epochs = 30)\n\n return history, model\n\n# plot diagnostic learning curves\ndef summarize_diagnostics(history):\n # plot loss\n pyplot.subplot(211)\n pyplot.title('Cross Entropy Loss')\n pyplot.plot(history.history['loss'], color='blue', label='train')\n pyplot.plot(history.history['val_loss'], color='orange', label='test')\n # plot accuracy\n pyplot.subplot(212)\n pyplot.title('Classification Accuracy')\n pyplot.plot(history.history['acc'], color='blue', label='train')\n pyplot.plot(history.history['val_acc'], color='orange', label='test')\n # save plot to file\n filename = 'model_diagn'\n pyplot.savefig(filename + '_plot.png')\n pyplot.close()\n\n##Antonis\ndef create_CNN():\n # def conv_block(inp,ind):\n # c1 = Conv1D(36,1,padding='same',activation='linear',name='c1_'+str(ind))(inp)\n # c3=Conv1D(64,3,padding='same',activation='linear',name='c3_'+str(ind))(inp)\n # c7=Conv1D(64,7,padding='same',activation='linear',name='c7_'+str(ind))(inp)\n # c9=Conv1D(64,9,padding='same',activation='linear',name='c9_'+str(ind))(inp)\n # conc = Concatenate(axis=-1,name='conc_'+str(ind))([c3,c7,c9])\n # bn = BatchNormalization(name='bn_'+str(ind))(conc)\n # drop = Dropout(0.4,name='drop_'+str(ind))(bn)\n # act = Activation('relu',name='relu_'+str(ind))(drop)\n \n # cc9 = Conv1D(27,9,padding='same',activation='linear',name='cc9_'+str(ind))(act)\n # bn2 = BatchNormalization(name='bn2_'+str(ind))(cc9)\n # drop2 = Dropout(0.4,name='drop2_'+str(ind))(bn2)\n # act2 = Activation('relu',name='relu2_'+str(ind))(drop2)\n \n # concat2 = Concatenate(axis=-1,name='conc2_'+str(ind))([c1,act,act2])\n # return concat2\n\n # def dense_block(inp,ind):\n # d1 = TimeDistributed(Dense(455,activation='linear',name='d_'+str(ind)))(inp)\n # bn = BatchNormalization(name='bnd_'+str(ind))(d1)\n # drop = Dropout(0.2,name='d_dropout_'+str(ind))(bn)\n # act = Activation('relu',name='relu_d_'+str(ind))(drop)\n # return act\n\n \n\n # c_ind=0\n # d_ind=0\n inp = Input((700,22))\n x = Conv1D(64,1,padding='same',activation='relu')(inp)\n outs = []\n for i in range(32):\n temp = Lambda(lambda x: x[ :, :, i*2:i*2+2])(x)\n outs.append(Conv1D(2,5,padding='same',activation='relu')(temp))\n conc = Concatenate()(outs)\n d = TimeDistributed(Dense(32, activation = 'relu'))(conc)\n o = TimeDistributed(Dense(9,activation='softmax'))(d)\n\n\n\n # inpp = inp\n\n # for i in range(n_super_blocks):\n # c1 = conv_block(inpp,c_ind)\n # c2 = conv_block(c1,c_ind+1)\n # c_ind+=2\n # inpp = dense_block(c2,d_ind)\n # d_ind+=1\n\n # o = TimeDistributed(Dense(9,activation='softmax'))(inpp)\n m = Model(inp,o)\n m.summary()\n return m\n\nprint(train_input_data.shape)\nprint(train_input_data_alt.shape)\nprint(train_profiles_np.shape)\nprint(train_target_data.shape)\n\nrandomize = np.arange(len(train_target_data))\nnp.random.shuffle(randomize)\n\ntrain_input_data = train_input_data[randomize]\ntrain_input_data_alt = train_input_data_alt[randomize]\ntrain_profiles_np = train_profiles_np[randomize]\ntrain_target_data = train_target_data[randomize]\n\nval_p = 0.2\nvn = int(val_p*train_target_data.shape[0])\n\n# # To use 3.3 Bidirectional GRU with convolutional blocks from paper (using a validation set) use:\n# X_train = [train_input_data[vn:,:,:], train_input_data_alt[vn:,:], train_profiles_np[vn:,:,:]]\n# y_train = train_target_data[vn:,:,:]\n# X_val = [train_input_data[:vn,:,:], train_input_data_alt[:vn,:], train_profiles_np[:vn,:,:]]\n# y_val = train_target_data[:vn,:,:]\n\n# # To use 3.3 Bidirectional GRU with convolutional blocks from paper (without a validation set) use:\n# X_train = [train_input_data, train_input_data_alt, train_profiles_np]\n# y_train = train_target_data\n# X_val = None\n# y_val = None\n\n# To use any other model with a simple one hot residue encoding (using a validation set) use:\nX_train = train_input_data[vn:,:,:]\ny_train = train_target_data[vn:,:,:]\nX_val = train_input_data[:vn,:,:]\ny_val = train_target_data[:vn,:,:]\nprint('X_train shape: ' + str(X_train.shape))\nprint('y_train shape: ' + str(y_train.shape))\nprint('X_val shape: ' + str(X_val.shape))\nprint('y_val shape: ' + str(y_val.shape))\n\n\nhistory, model = train(X_train, y_train, X_val=X_val, y_val=y_val)\n\n# Save the model as a JSON format\nmodel.save_weights(\"cb513_weights_1.h5\")\nwith open(\"model_tyt.json\", \"w\") as json_file:\n json_file.write(model.to_json())\n\n# Save training history for parsing\nwith open(\"history_tyt.pkl\", \"wb\") as hist_file:\n pickle.dump(history.history, hist_file)\n\n\n# Predict on test dataset and save the output (1 input model)\nrun_test_single_input(model,\n test_input_data[:],\n \"cb513_test_1.csv\", \"cb513_test_prob_1.npy\")\n\n# # Predict on test dataset and save the output (3 input model)\n# run_test(model,\n# test_input_data[:],\n# test_input_data_alt[:],\n# test_profiles_np[:],\n# \"cb513_test_1.csv\", \"cb513_test_prob_1.npy\")\n\"\"\" End single run \"\"\"\n\nsummarize_diagnostics(history)", "sub_path": "tiny_cnn/tiny_cnn.py", "file_name": "tiny_cnn.py", "file_ext": "py", "file_size_in_byte": 14055, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "os.environ", "line_number": 30, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 78, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 80, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 89, "usage_type": "call"}, {"api_name": "tensorflow.greater", "line_number": 90, "usage_type": "call"}, {"api_name": "keras.backend.cast", "line_number": 91, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 91, "usage_type": "name"}, {"api_name": "keras.backend.equal", "line_number": 91, "usage_type": "call"}, {"api_name": "tensorflow.boolean_mask", "line_number": 91, "usage_type": "call"}, {"api_name": "keras.backend.floatx", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 113, "usage_type": "call"}, {"api_name": "keras.preprocessing.text.Tokenizer", "line_number": 125, "usage_type": "call"}, {"api_name": "keras.preprocessing.text.Tokenizer", "line_number": 127, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 133, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence", "line_number": 133, "usage_type": "name"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 138, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence", "line_number": 138, "usage_type": "name"}, {"api_name": "keras.utils.to_categorical", "line_number": 140, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 144, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence", "line_number": 144, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 165, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 196, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 242, "usage_type": "call"}, {"api_name": "keras.optimizers.Adam", "line_number": 253, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 270, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 270, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 271, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 271, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 272, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 272, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 273, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 273, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 275, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 275, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 276, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 276, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 277, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 277, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 278, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 278, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 281, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 281, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 282, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 282, "usage_type": "name"}, {"api_name": "keras.models.Input", "line_number": 315, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 316, "usage_type": "call"}, {"api_name": "keras.layers.Lambda", "line_number": 319, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 320, "usage_type": "call"}, {"api_name": "keras.layers.Concatenate", "line_number": 321, "usage_type": "call"}, {"api_name": "keras.layers.TimeDistributed", "line_number": 322, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 322, "usage_type": "call"}, {"api_name": "keras.layers.TimeDistributed", "line_number": 323, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 323, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 337, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 346, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 347, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 347, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 389, "usage_type": "call"}]} +{"seq_id": "358581354", "text": "from selenium import webdriver\nimport time\n\nclass WhatsappBot:\n def __init__(self):\n self.message = \"Outro Teste\"\n self.contacts = [\"Rodrigo Maduro\"]\n options = webdriver.ChromeOptions()\n options.add_argument('lang=pt-br')\n self.driver = webdriver.Chrome(executable_path=r'./chromedriver')\n\n def SendMessages(self):\n #+55 11 94810-8855\n\t\t#
\n\t\t#\n self.driver.get('https://web.whatsapp.com/')\n time.sleep(30)\n\t\t\n for contact in self.contacts:\n contact = self.driver.find_element_by_xpath(f\"//span[@title='{contact}']\")\n time.sleep(3)\n contact.click()\n chat_box = self.driver.find_element_by_class_name('_13mgZ')\n time.sleep(3)\n chat_box.click()\n chat_box.send_keys(self.message)\n btn_send = self.driver.find_element_by_xpath(\"//span[@data-icon='send']\")\n time.sleep(3)\n btn_send.click()\n time.sleep(5)\n\n\nbot = WhatsappBot()\nbot.SendMessages()", "sub_path": "zapbot.py", "file_name": "zapbot.py", "file_ext": "py", "file_size_in_byte": 1167, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "selenium.webdriver.ChromeOptions", "line_number": 8, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 8, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 10, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 10, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 17, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 21, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 24, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 28, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "465434838", "text": "import re, json, requests, csv, random, math\nimport firebase_admin, os\nfrom firebase_admin import firestore\n\ndb = firestore.client()\n\ndef req(api_method, renew=False):\n print('requesting- ', api_method)\n try:\n if renew:\n raise\n\n with open(f'./cache/{api_method}.json', 'r') as cache:\n return json.loads(cache.read())\n except:\n response = requests.get(f'https://codeforces.com/api/{api_method}')\n return json.loads(response.text)\n\nuser = {}\n\ndef getCfContestInfo(arr):\n docs = db.collection(u'profiles').stream()\n\n shandles=''\n handles=[]\n f= 1\n for doc in docs:\n\n D = doc.to_dict()\n\n user[D['cf']] = D['username']\n user[D['vj']] = D['username']\n user['username'] = [D['cf'], D['vj']]\n\n if f:\n f+=1\n shandles = doc.to_dict()['cf']\n else:\n shandles= shandles + ';'+ doc.to_dict()['cf']\n\n handles.append(doc.to_dict()['cf'])\n\n # db.child('contests').child('codeforces').remove()\n\n if not arr: return {}\n\n cf = {}\n cfInfo = {}\n\n for i in handles:\n cf.update({i : i})\n\n for each in arr:\n row = req(f'contest.standings?contestId={each}&handles={shandles}', False)\n\n if row['status'] != 'OK': continue\n\n for i in row['result']['rows']:\n data={\n cf[i['party']['members'][0]['handle']] :{\n 'solved': int(i['points']),\n 'penalty': int(i['penalty'])*60,\n 'person' : cf[i['party']['members'][0]['handle']],\n # 'rank' : i['rank']\n }\n }\n\n if cf[i['party']['members'][0]['handle']] in cfInfo:\n cfInfo[cf[i['party']['members'][0]['handle']]]['solved']+= data[cf[i['party']['members'][0]['handle']]]['solved']\n cfInfo[cf[i['party']['members'][0]['handle']]]['penalty']+= data[cf[i['party']['members'][0]['handle']]]['penalty']\n\n else: cfInfo.update(data)\n\n return cfInfo\n\nvjudge = {}\n\ndef generateVjudgeList():\n docs = db.collection('vjudgeContests').stream()\n\n for doc in docs:\n data = doc.to_dict()\n\n ins = []\n for each in data['ranks']:\n if each['vj'] in user:\n ins.append(\n {\n 'person': user[each['vj']],\n 'solved': each['solved'],\n 'penalty' : each['penalty']\n }\n )\n vjudge.update({\n str(doc.id) : ins\n })\n\ndef generateFavorite(vjcontests, cfcontests):\n\n data = getCfContestInfo(cfcontests)\n generateVjudgeList()\n\n for each in vjcontests:\n if str(each) not in vjudge: continue\n genVjList = vjudge[str(each)]\n\n for i in genVjList:\n tmpdata={\n i['person'] : {\n 'solved': i['solved'],\n 'penalty': i['penalty']*60,\n 'person' : i['person']\n }\n }\n\n if i['person'] in data:\n data[i['person']]['solved'] += i['solved']\n data[i['person']]['penalty'] += i['penalty']\n\n else: data.update(tmpdata)\n \n datas = []\n for i in data:\n data[i]\n datas.append(data[i])\n\n datas = sorted(datas, key = lambda i: (i['solved'], -i['penalty']),reverse = True)\n \n for i in range(len(datas)):\n datas[i].update({'position': i+1})\n\n print(datas)\n\n return datas\n\n# generateFavorite([376797],None)\n\ndef generateContestPerformance(contestId):\n docs = db.collection(u'profiles').stream()\n\n shandles=''\n handles=[]\n\n USERNM = {}\n\n f=False\n for doc in docs:\n handle = doc.to_dict()['cf']\n shandles= shandles + ';' + handle if f else '' + handle\n f=True\n USERNM.update({handle : doc.id})\n handles.append(handle)\n\n cf = {}\n\n for i in handles:\n cf.update({i : i})\n\n print(cf)\n\n contestant = []\n\n rows = req(f'contest.standings?contestId={contestId}&handles={shandles}', False)\n\n numOfProblems = len(rows['result']['problems'])\n\n for i in rows['result']['rows']:\n problems = [0 for x in range(numOfProblems)]\n solved = [0 for x in range(numOfProblems)]\n \n ind=0\n for j in i['problemResults']:\n if j['points']:\n solved[ind] = 1\n problems[ind]=(j['bestSubmissionTimeSeconds'] + j['rejectedAttemptCount']*20)\n ind+=1\n \n data = {\n 'id' : cf[i['party']['members'][0]['handle']],\n 'solve' : solved,\n 'penalty' : problems\n }\n\n contestant.append(data)\n \n n= len(contestant)\n\n dummy = {\n 'id' : 0,\n 'penalty' : [0 for i in range(numOfProblems)],\n 'solve' : [0 for i in range(numOfProblems)]\n }\n\n res= {}\n\n for i in range(numOfProblems):\n for j in range(n): dummy['solve'][i] |= contestant[j]['solve'][i]\n if dummy['solve'][i] == 0: continue\n\n dummy['penalty'][i] = 1000000000\n\n for j in range(n):\n if contestant[j]['solve'][i]:\n dummy['penalty'][i] = min(contestant[j]['penalty'][i], dummy['penalty'][i])\n\n for i in range(n):\n total_solve=0\n total_penalty=0\n b_sol=0\n b_pen=0\n\n for j in range(numOfProblems):\n b_sol+= dummy['solve'][j]\n\n if contestant[i]['solve'][j] == 0: continue\n b_pen += dummy['penalty'][j]\n total_solve+=1\n total_penalty+=contestant[i]['penalty'][j]\n\n data = {\n contestant[i]['id']:{\n 'id' : USERNM[contestant[i]['id']],\n 'capability': (total_solve / b_sol) * 100.00,\n 'time': (total_penalty / b_pen) * 100.00\n }\n }\n res.update(data)\n\n finalres= []\n\n for i in res:\n finalres.append({\n 'id': res[i]['id'],\n 'capability':res[i]['capability'],\n 'time':res[i]['time']\n })\n\n finalres = sorted(finalres, key = lambda i: (i['capability'], -i['time']),reverse = True)\n\n print(finalres)\n \n return finalres\n\n# generateContestPerformance(1400)\n\ndef generateContestPerformanceCombined(contest, weight):\n data = {}\n sz = len(contest)\n\n sum=0\n\n for i in range(sz):\n getOne = generateContestPerformance(contest[i]) # id, capability, time\n sum+=weight[i]\n for j in getOne:\n if j['id'] in data:\n data[j['id']]['capability']+= j['capability']*weight[i]\n data[j['id']]['time']+= j['time']*weight[i]\n\n else:\n tmp = {\n j['id'] : {\n 'id' : j['id'],\n 'capability' : j['capability']*weight[i],\n 'time' : j['time']*weight[i]\n }\n }\n\n data.update(tmp)\n\n # print(sum)\n\n for i in data:\n data[i]['capability']=round(data[i]['capability']/sum,2)\n data[i]['time']=round(data[i]['time']/sum, 2)\n \n # print(data)\n\n ret = []\n\n for i in data:\n ret.append(data[i])\n\n ret = sorted(ret, key = lambda i: (i['capability'], -i['time']),reverse = True)\n \n return ret\n\n# generateContestPerformanceCombined([1303,1295],[50,32])\n# generateFavorite([356330],[1303])\n# generateFavorite([],[1303])\n\ndef eloprobablity(ra,rb):\n return 1.00/(1+pow(10.0, (rb-ra)/1000.00))\n \ndef getSeed(rating,currentRating,pos,n):\n ret=1.00\n for i in range(n):\n if i!=pos:\n ret+=eloprobablity(currentRating[i], rating)\n return ret\n \ndef getRating(rank,currentRating,pos,n):\n tmpgeomean=math.sqrt(rank[pos]*getSeed(currentRating[pos],currentRating,pos,n))\n lo=1\n hi=8000\n while hi-lo>1:\n mid=int(hi+lo)/2\n if getSeed(mid,currentRating,pos,n) List[Assignment]:\n \"\"\"\n Returns a list of assignments sorted in ascending order by their IDs\n \"\"\"\n return sorted(self.__assignmentRepository.getItems(), key=lambda assignment: assignment.getAssignmentId())\n\n def addAssignment(self, assignmentId: int, description: str, deadline: datetime.date) -> Assignment:\n \"\"\"\n Adds an assignment to the repository\n \"\"\"\n assignment = Assignment(assignmentId, description, deadline)\n AssignmentValidator.validateAssignment(assignment)\n self.__assignmentRepository.addItem(assignment)\n self.__changesStack.addChange(ChangesStack.ItemAdded(assignment), newCommit=True)\n\n return assignment\n\n def removeAssignment(self, assignmentId: int):\n \"\"\"\n Removes an assignment from the repository\n \"\"\"\n assignment = self.findAssignment(assignmentId)\n self.__assignmentRepository.deleteItem(assignment)\n self.__changesStack.beginCommit()\n self.__changesStack.addChange(ChangesStack.ItemRemoved(assignment))\n if self.__deleteCallback is not None:\n self.__deleteCallback(assignment)\n else:\n self.__changesStack.endCommit()\n\n def findAssignment(self, assignmentId: int) -> Assignment:\n \"\"\"\n Searches an assignment and returns it if found. Raises InvalidAssignmentId otherwise\n \"\"\"\n assignment = Assignment(assignmentId)\n foundAssignment = self.__assignmentRepository.getItem(assignment)\n if foundAssignment is None:\n raise AssignmentIdNotFound\n return foundAssignment\n\n def updateAssignment(self, assignmentId: int, description: str, deadline: datetime.date):\n \"\"\"\n Updates the assignment data\n \"\"\"\n assignment = self.findAssignment(assignmentId)\n newAssignment = copy(assignment)\n newAssignment.setDescription(description)\n newAssignment.setDeadline(deadline)\n AssignmentValidator.validateAssignment(newAssignment)\n self.__assignmentRepository.updateItem(newAssignment)\n\n self.__changesStack.beginCommit()\n self.__changesStack.addChange(ChangesStack.ItemRemoved(assignment))\n self.__changesStack.addChange(ChangesStack.ItemAdded(newAssignment))\n self.__changesStack.endCommit()\n\n def addRandomAssignments(self, number):\n descriptionTitles = [\n \"project\",\n \"documentary\",\n \"study\",\n ]\n descriptionSubjects = [\n \"importance\",\n \"problem\",\n \"execution\",\n \"reuse\",\n \"toxicity\",\n \"revolution\",\n \"discovery\",\n \"superiority\",\n \"union\",\n \"replication\"\n ]\n descriptionAdjectives = [\n \"dumb\",\n \"dark\",\n \"unused\",\n \"unseen\",\n \"reheated\",\n \"purple\",\n \"the chosen\",\n \"fast\",\n \"stupid\",\n \"left-handed\",\n \"drunk\",\n \"smart-ass\"\n ]\n descriptionNouns = [\n \"programmers\",\n \"memes\",\n \"weed\",\n \"meals\",\n \"chemistry\",\n \"doors\",\n \"birds\",\n \"cars\",\n \"PCs\",\n \"floppy disks\",\n \"refrigerators\",\n \"ice\",\n \"mountain trip\",\n \"stone age\",\n \"underground cavern\",\n \"board games\",\n \"drawings\"\n ]\n\n for i in range(number):\n descriptionTitle = random.choice(descriptionTitles)\n descriptionSubject = random.choice(descriptionSubjects)\n descriptionAdjective = random.choice(descriptionAdjectives)\n descriptionNoun = random.choice(descriptionNouns)\n description = \"A \" + descriptionTitle + \" about the \" + descriptionSubject + \" of \" + descriptionAdjective + \\\n \" \" + descriptionNoun\n\n assignmentDate = self.randomDate(datetime.date(2018, 1, 1), datetime.date(2020, 1, 1))\n self.addAssignment(i, description, assignmentDate)\n\n @staticmethod\n def randomDate(start, end):\n \"\"\"\n Generate a random datetime between `start` and `end`\n \"\"\"\n return start + datetime.timedelta(\n # Get a random amount of seconds between `start` and `end`\n seconds=random.randint(0, int((end - start).total_seconds())),\n )\n", "sub_path": "Assignment08/logic/AssignmentController.py", "file_name": "AssignmentController.py", "file_ext": "py", "file_size_in_byte": 5199, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "repository.Repository.Repository", "line_number": 16, "usage_type": "name"}, {"api_name": "logic.ChangesStack.ChangesStack", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 21, "usage_type": "name"}, {"api_name": "model.Assignment.Assignment", "line_number": 21, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 27, "usage_type": "attribute"}, {"api_name": "model.Assignment.Assignment", "line_number": 31, "usage_type": "call"}, {"api_name": "model.Validators.AssignmentValidator.validateAssignment", "line_number": 32, "usage_type": "call"}, {"api_name": "model.Validators.AssignmentValidator", "line_number": 32, "usage_type": "name"}, {"api_name": "logic.ChangesStack.ChangesStack.ItemAdded", "line_number": 34, "usage_type": "call"}, {"api_name": "logic.ChangesStack.ChangesStack", "line_number": 34, "usage_type": "name"}, {"api_name": "model.Assignment.Assignment", "line_number": 27, "usage_type": "name"}, {"api_name": "logic.ChangesStack.ChangesStack.ItemRemoved", "line_number": 45, "usage_type": "call"}, {"api_name": "logic.ChangesStack.ChangesStack", "line_number": 45, "usage_type": "name"}, {"api_name": "model.Assignment.Assignment", "line_number": 55, "usage_type": "call"}, {"api_name": "logic.ControllerError.AssignmentIdNotFound", "line_number": 58, "usage_type": "name"}, {"api_name": "model.Assignment.Assignment", "line_number": 51, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 61, "usage_type": "attribute"}, {"api_name": "copy.copy", "line_number": 66, "usage_type": "call"}, {"api_name": "model.Validators.AssignmentValidator.validateAssignment", "line_number": 69, "usage_type": "call"}, {"api_name": "model.Validators.AssignmentValidator", "line_number": 69, "usage_type": "name"}, {"api_name": "logic.ChangesStack.ChangesStack.ItemRemoved", "line_number": 73, "usage_type": "call"}, {"api_name": "logic.ChangesStack.ChangesStack", "line_number": 73, "usage_type": "name"}, {"api_name": "logic.ChangesStack.ChangesStack.ItemAdded", "line_number": 74, "usage_type": "call"}, {"api_name": "logic.ChangesStack.ChangesStack", "line_number": 74, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 130, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 131, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 132, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 133, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 137, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 145, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 147, "usage_type": "call"}]} +{"seq_id": "64880047", "text": "## Name: ServiceInfo.py\n## \n## Purpose: Reads an ArcGIS Server Cached Map Service REST end point and \n## extracts TileInfo, Columns, Rows Origin X, Origin Y and Full Extent\n## using Urllib2 to get to the URL and JSON to interpret and read the \n## ArcGIS Server Java Script Object Notation.\n## \n## Author: Tom Brenneman, Esri\n## \n## Date: Thursday, September 10, 2009\n## \n## Version: Python 2.6.5 (r265:79096, Mar 19 2010, 21:48:26) [MSC v.1500 32 bit (Intel)] on win32]\n## \n## Copyright 2001-2010 ESRI. \n## All rights reserved under the copyright laws of the United States. \n## You may freely redistribute and use this sample code, with or without \n## modification. The sample code is provided without any technical support or \n## updates. \n## \n## Disclaimer OF Warranty: THE SAMPLE CODE IS PROVIDED \"AS IS\" AND ANY EXPRESS OR \n## IMPLIED WARRANTIES, INCLUDING THE IMPLIED WARRANTIES OF MERCHANTABILITY \n## FITNESS FOR A PARTICULAR PURPOSE, OR NONINFRINGEMENT ARE DISCLAIMED. IN NO \n## EVENT SHALL ESRI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, \n## INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT \n## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR \n## PROFITS; OR BUSINESS INTERRUPTION) SUSTAINED BY YOU OR A THIRD PARTY, HOWEVER \n## CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \n## OR TORT ARISING IN ANY WAY OUT OF THE USE OF THIS SAMPLE CODE, EVEN IF ADVISED \n## OF THE POSSIBILITY OF SUCH DAMAGE. THESE LIMITATIONS SHALL APPLY \n## NOTWITHSTANDING ANY FAILURE OF ESSENTIAL PURPOSE OF ANY LIMITED REMEDY. \n## \n## For additional information contact: \n## \n## Environmental Systems Research Institute, Inc. \n## Attn: Contracts Dept. \n## 380 New York Street \n## Redlands, California, U.S.A. 92373 \n## Email: contracts@esri.com \n##*********************************************************************** \nimport math\n\ndef getCacheInfo(URL, datasetExtent=''):\n import urllib2, json \n# from StringIO import StringIO\n \n serviceURL = URL + \"?f=json\"\n req = urllib2.Request(url=serviceURL)\n f = urllib2.urlopen(req)\n jsonRespons = f.read()\n serviceInfo = json.loads(jsonRespons)\n \n tileInfo = serviceInfo['tileInfo']\n tilePixelHeigh = tileInfo['cols']\n tilePixelWidth = tileInfo['rows']\n tileOriginX = tileInfo['origin']['x']\n tileOriginY = tileInfo['origin']['y']\n fullExtent = serviceInfo['fullExtent']\n \n levels = {}\n for lod in tileInfo['lods']:\n res = lod['resolution']\n scale = lod['scale']\n tileGroundWidth = tilePixelWidth * res\n tileGroundHeight = tilePixelHeigh * res\n if datasetExtent != '':\n xMin = tileOriginX-datasetExtent.XMin\n yMin = tileOriginY-datasetExtent.YMin\n xMax = tileOriginX-datasetExtent.XMax\n yMax = tileOriginY-datasetExtent.YMax\n else:\n xMin = tileOriginX-fullExtent['xmin']\n yMin = tileOriginY-fullExtent['ymin']\n xMax = tileOriginX-fullExtent['xmax']\n yMax = tileOriginY-fullExtent['ymax']\n startTileRow = abs(math.trunc((yMax)/tileGroundHeight))\n endTileRow = abs(math.trunc((yMin)/tileGroundHeight))\n startTileCol = abs(math.trunc((xMin)/tileGroundWidth))\n endTileCol = abs(math.trunc((xMax)/tileGroundWidth))\n \n levels[lod['level']] = {'scale': scale, 'resolution': res, \n 'tileGroundWidth': tileGroundWidth, \n 'tileGroundHeight': tileGroundHeight,\n 'startTileRow': startTileRow,\n 'endTileRow': endTileRow,\n 'startTileCol': startTileCol,\n 'endTileCol': endTileCol}\n cacheInfo = {'spatialReference': tileInfo['spatialReference'], 'fullExtent': fullExtent, 'originX': tileOriginX,'originY': tileOriginY, 'tileHeight': tileInfo['cols'], 'tileWidth': tileInfo['rows'], 'levels': levels }\n return cacheInfo\n", "sub_path": "CreateServerTilingScheme/serviceInfo.py", "file_name": "serviceInfo.py", "file_ext": "py", "file_size_in_byte": 4148, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "urllib2.Request", "line_number": 47, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 48, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 50, "usage_type": "call"}, {"api_name": "math.trunc", "line_number": 75, "usage_type": "call"}, {"api_name": "math.trunc", "line_number": 76, "usage_type": "call"}, {"api_name": "math.trunc", "line_number": 77, "usage_type": "call"}, {"api_name": "math.trunc", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "538227505", "text": "\"\"\"\nImage2Seq Model with Attention Pipeline\nTrain-Validation Script\n\"\"\"\n#!/usr/bin/env python3\n#############################################################################\n# Imports #\n#############################################################################\n\n# Standard imports ##########################################################\nimport absl.logging\nimport argparse\nimport csv\nimport logging\nimport numpy as np\nimport pandas as pd\nfrom PIL import Image\nimport os \nfrom six import raise_from\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import shuffle\nimport sys \nfrom tqdm import tqdm\n\n# Stop pycache\nsys.dont_write_bytecode = True\n\n# GPU setup #################################################################\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"1\"\n\n# Keras and tensorflow imports ##############################################\nimport tensorflow as tf\ntf.compat.v1.enable_eager_execution()\nimport tensorflow.keras as keras\nimport tensorflow.keras.backend as K\n\n# Local imports #############################################################\n# Allow relative imports when being executed as script.\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))\nimport image2seq\n__package__ = \"image2seq\"\n\nfrom image2seq.preprocessing.token_preprocessing \\\n import token_preprocessing, matrix_shape_preprocessing, \\\n detected_values_preprocessing\nfrom image2seq.models.eda_xu import EDAXU\nfrom image2seq.models.eda_xu_mlp import EDAXUMLP \nfrom image2seq.models.eda_xu_mlp_no_lstm2mlp import EDAXUMLPNOLSTM2MLP\nfrom image2seq.models.eda_xu_mlp_exp_loss import EDAXUMLPEXPLOSS\nfrom image2seq.models.drake_concat import DRAKECONCAT\nfrom image2seq.models.drake_concat_mlp import DRAKECONCATMLP\nfrom image2seq.models.drake_detections2 import DRAKEDETECTIONS2\nfrom image2seq.models.drake_parallel import DRAKEPARALLEL\n\n# Logging options ##########################################################'\nlogging.root.removeHandler(absl.logging._absl_handler)\nabsl.logging._warn_preinit_stderr=False\ndate = pd.datetime.now().date()\nhour = pd.datetime.now().hour\nminute = pd.datetime.now().minute\nlogging.basicConfig(\n level=logging.INFO,\n format='%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s',\n filename=\"image2seq/logs/train_log_{}_{}{}.txt\"\n .format(date, hour, minute))\nconsole = logging.StreamHandler()\nconsole.setLevel(logging.INFO)\nformatter = logging.Formatter(\n '%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s')\nconsole.setFormatter(formatter)\nlogging.getLogger(\"\").addHandler(console)\n\n#############################################################################\n# Model Setup #\n#############################################################################\nlogging.info(\"MODEL SETUP - Tensorflow version\".format(tf.__version__))\nlogging.info(\"MODEL SETUP - Training Script - train_full.py\")\nfrom tensorflow.python.client import device_lib\nlogging.info(\"MODEL SETUP - CUDA VISIBLE DEVICES {}\"\n .format(device_lib.list_local_devices()))\ntf.compat.v1.debugging.assert_equal(True, tf.test.is_gpu_available())\ntf.compat.v1.debugging.assert_equal(True, tf.test.is_built_with_cuda())\n\nimage2seq = EDAXUMLP()\nlogging.info(\"MODEL SETUP - image2seq model {} instantiated\"\n .format(image2seq.get_model_name()))\nlogging.info(\"MODEL SETUP - log file = image2seq/logs/train_log_{}_{}{}.txt\"\n .format(date, hour, minute))\n\n# Parameter options #########################################################\n# CSV file of images to import\n# images_seqs_csv = \"/test_data/stage2_data_train/stage2_train.txt\"\nimages_seqs_csv = \"/stage2_data_train_1x1_demo/stage2_train.txt\"\ntrain_info_csv = \"/stage2_data_train_1x1_demo/stage2_train.txt\"\n\n# Data config\nbatch_size = 64\nlogging.info(\"MODEL SETUP - Batch size {}\".format(batch_size))\n\n# Optimizer selection\noptimizer = tf.compat.v1.train.AdadeltaOptimizer()\n\n# Training loop\nnum_epochs = 30\nlogging.info(\"MODEL SETUP - Number of epochs {}\".format(num_epochs))\n\n# Checkpointing #############################################################\ncheckpoint_directory = \\\n \"./image2seq/checkpoints/train/{}_{}_{}{}\"\\\n .format(image2seq.get_model_name(), date, hour, minute)\n# checkpoint_directory = \\\n# \"./image2seq/checkpoints/train/eda_xu_mlp_no_lstm2mlp_2019-08-17_945\"\n\ncheckpoint = tf.train.Checkpoint(optimizer=optimizer,\n model=image2seq)\ncheckpoint_manager = tf.train.CheckpointManager(checkpoint,\n checkpoint_directory,\n max_to_keep=1)\n# Don't load latest checkpoint because usually want to start from scratch\n# status = checkpoint.restore(checkpoint_manager.latest_checkpoint)\n\n# Output file ###############################################################\nresults_file = \"image2seq/checkpoints/train/{}_{}_{}{}/results.txt\"\\\n .format(image2seq.get_model_name(), date, hour, minute)\n# results_file = checkpoint_directory + \"/results.txt\"\npredictions_file = \"image2seq/checkpoints/train/{}_{}_{}{}/predictions.txt\"\\\n .format(image2seq.get_model_name(), date, hour, minute)\n# predictions_file = checkpoint_directory + \"/predictions.txt\"\nimage2seq.set_predictions_file(predictions_file)\n\n#############################################################################\n# Pre-processing #\n#############################################################################\n# STEP 1: Pre-process token #################################################\nlist_image_paths, list_processed_matrix_seqs = \\\n token_preprocessing(images_seqs_csv, \n batch_size=batch_size, \n skip_padding=True)\n\nlen_list_image_paths = len(list_image_paths)\n\n_, list_matrix_shapes = \\\n detected_values_preprocessing(train_info_csv,\n batch_size=batch_size)\n\nlogging.info(\"PREPROCESSING - Step 1 - Token preprocessing\")\ntf.compat.v1.debugging.assert_equal(len(_), len_list_image_paths)\ntf.compat.v1.debugging.assert_equal(len(list_matrix_shapes), \n len(list_processed_matrix_seqs))\ntf.compat.v1.debugging.assert_equal(len(_), len(list_matrix_shapes))\n\n\n# STEP 2: Train-validation split ############################################\nshuffled_image_paths, shuffled_matrix_seqs, shuffled_matrix_shapes = \\\n shuffle(list_image_paths, \n list_processed_matrix_seqs, \n list_matrix_shapes, \n random_state=1)\n\nimg_name_train, img_name_val, seq_train, seq_val, matrix_shapes_train, \\\n matrix_shapes_val = train_test_split(shuffled_image_paths, \n shuffled_matrix_seqs, \n shuffled_matrix_shapes,\n test_size=0.5,\n random_state=0)\n\nlogging.info(\"PREPROCESSING - Step 2 - Train test split -\"\n \"Train examples {} Validation examples {}\"\n .format(len(img_name_train), len(img_name_val)))\ntf.compat.v1.debugging.assert_equal(len(img_name_train), len(seq_train))\ntf.compat.v1.debugging.assert_equal(len(img_name_val), len(seq_val))\ntf.compat.v1.debugging.assert_equal(len(matrix_shapes_train), len(seq_train))\ntf.compat.v1.debugging.assert_equal(len(matrix_shapes_val), len(seq_val))\n\n# STEP 3: Sort images and tokens by length ##################################\nsorted_seq_train, sorted_img_name_train, sorted_matrix_shapes_train = \\\n sorted((seq_train, img_name_train, matrix_shapes_train), key=len)\n\nsorted_seq_val, sorted_img_name_val, sorted_matrix_shapes_val = \\\n sorted((seq_val, img_name_val, matrix_shapes_val), key=len)\n\n# STEP 4: Create tf dataset from generator ##################################\ndef train_generator():\n for x, y, z in zip(sorted_img_name_train, \n sorted_seq_train, \n sorted_matrix_shapes_train):\n yield (x, y, z)\n\ndef val_generator():\n for x, y, z in zip(sorted_img_name_val, \n sorted_seq_val,\n sorted_matrix_shapes_val):\n yield (x, y, z)\n\ntrain_dataset = \\\n tf.data.Dataset.from_generator(\n generator=train_generator,\n output_types=(tf.string, tf.int32, tf.int32))\n\nvalidation_dataset = \\\n tf.data.Dataset.from_generator(\n generator=val_generator,\n output_types=(tf.string, tf.int32, tf.int32))\n\n# STEP 5: Load images #######################################################\ndef load_image(image_path, seq, matrix_shapes):\n \"\"\"\n Load image from image_path resizing it to match inputs required for \n InceptionV3 - notably width and height of 299 pixels\n \"\"\"\n img = tf.io.read_file(image_path)\n img = tf.image.decode_png(img, channels=3)\n img = tf.image.resize(img, (299, 299))\n img = tf.keras.applications.inception_v3.preprocess_input(img)\n return img, seq, matrix_shapes\n\ntrain_dataset = train_dataset.map(\n lambda item1, item2, item3:\n tf.numpy_function(load_image, \n [item1, item2, item3],\n [tf.float32, tf.int32, tf.int32]),\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\nvalidation_dataset = validation_dataset.map(\n lambda item1, item2, item3:\n tf.numpy_function(load_image, \n [item1, item2, item3],\n [tf.float32, tf.int32, tf.int32]),\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\nlogging.info(\"PREPROCESSING - Step 3 - Images processed\")\n\n# STEP 6: Pad by batch ######################################################\ntrain_dataset = train_dataset.padded_batch(\n batch_size,\n padded_shapes=([None, None, None], [None], [None]))\ntrain_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)\n\nvalidation_dataset = validation_dataset.padded_batch(\n batch_size,\n padded_shapes=([None, None, None], [None], [None]))\nvalidation_dataset = \\\n validation_dataset.prefetch(tf.data.experimental.AUTOTUNE)\n\n#############################################################################\n# Train-validation loop #\n#############################################################################\nlist_train_epoch_losses = []\nlist_val_epoch_losses = []\nlist_val_edit_distance = []\n\nfor epoch in range(num_epochs):\n ###########################################################################\n # Train loop #\n ###########################################################################\n train_epoch_loss = 0\n train_num_batches = 0\n pbar = tqdm(total=len(seq_train))\n one_percent_progress = len(seq_train) / 1000\n num_images_processed = 0\n logging.info(\"TRAINING - Epoch {} Model {}\"\n .format(epoch, image2seq.get_model_name()))\n\n for (train_batch, (train_img, train_target, train_detections)) \\\n in enumerate(train_dataset):\n # Train Batch ###########################################################\n train_batch = train_batch + 1\n with tf.GradientTape() as tape:\n train_batch_loss, _ = image2seq([train_img, \n train_target, \n train_detections],\n dropout=False)\n\n # Logging, Debug & Assert ###############################################\n # There are 206 trainable variables = \n # image encoder 190 + token embedding 1 + attention 6 + mlp 4 + lstm 3\n # + detections2hidden 2\n tf.compat.v1.debugging.assert_equal(\n 201,\n len(image2seq.get_no_lstm_variables()))\n\n # Calculate gradients ###################################################\n gradients = tape.gradient(train_batch_loss, \n image2seq.get_no_lstm_variables())\n \n # Apply gradients #######################################################\n optimizer.apply_gradients(\n grads_and_vars=zip(gradients, image2seq.get_no_lstm_variables()))\n\n # Update epoch statistics ###############################################\n train_epoch_loss += train_batch_loss\n train_num_batches = train_num_batches + 1\n\n if train_batch * batch_size - num_images_processed > \\\n one_percent_progress:\n rolling_mean = float(train_epoch_loss) / float(train_num_batches)\n pbar.set_description(\"TRAINING - Epoch {} Batch {} Rolling mean \"\n \"batch loss {}\"\n .format(epoch, train_batch, rolling_mean))\n pbar.update(train_batch * batch_size - num_images_processed)\n num_images_processed = train_batch * batch_size\n\n # End of epoch train statistics ###########################################\n mean_train_epoch_loss = float(train_epoch_loss) / float(train_num_batches)\n list_train_epoch_losses.append(mean_train_epoch_loss)\n pbar.close()\n\n logging.info(\"TRAINING - Epoch {}: Epoch mean losses = {}\"\n .format(epoch, mean_train_epoch_loss))\n \n ###########################################################################\n # Validation loop #\n ###########################################################################\n val_epoch_loss = 0\n val_num_batches = 0\n val_epoch_edit_distance = 0\n \n for (val_batch, (val_img, val_target, val_detections)) \\\n in enumerate(validation_dataset):\n # Validate batch ########################################################\n val_batch_loss, val_batch_edit_distance = image2seq([val_img, \n val_target,\n val_detections], \n val_mode=True)\n logging.debug(\"VALIDATION - Epoch {} Batch {} Batch loss {}\"\n .format(epoch, val_batch, val_batch_loss))\n\n # Update epoch statistics ###############################################\n val_epoch_loss += val_batch_loss\n val_epoch_edit_distance += val_batch_edit_distance\n val_num_batches = val_num_batches + 1\n \n # End of epoch validation statistics ######################################\n mean_val_epoch_loss = float(val_epoch_loss) / float(val_num_batches)\n mean_val_edit_distance = float(val_batch_edit_distance) / \\\n float(val_num_batches)\n list_val_epoch_losses.append(mean_val_epoch_loss)\n list_val_edit_distance.append(mean_val_edit_distance)\n\n logging.info(\"VALIDATION - Epoch {}: Epoch mean losses = {}\"\n .format(epoch, mean_val_epoch_loss))\n logging.info(\"VALIDATION - Epoch {}: Epoch mean edit distance = {}\"\n .format(epoch, mean_val_edit_distance))\n\n # Save checkpoint #########################################################\n if mean_val_epoch_loss == min(list_val_epoch_losses):\n logging.info(\"VALIDATION - Save checkpoint because {} loss is minimum\"\n .format(mean_val_epoch_loss))\n checkpoint_manager.save()\n else:\n logging.info(\"VALIDATION - Do not save checkpoint because {} loss is \"\n \"greater than minimum loss of {}\"\n .format(mean_val_epoch_loss, min(list_val_epoch_losses)))\n\n ###########################################################################\n # Epoch results #\n ###########################################################################\n with open(results_file, \"a+\") as rf:\n rf.write(\"{},{},{},{}\\n\"\\\n .format(epoch, \n mean_train_epoch_loss, \n mean_val_epoch_loss,\n mean_val_edit_distance))\n \n# Training results ##########################################################\nlogging.info(\"TRAINING - Finished - Losses \\n{}\"\n .format(list_train_epoch_losses))\nlogging.info(\"VALIDATION - Finished - Losses \\n{}\"\n .format(list_val_epoch_losses))\nlogging.info(\"VALIDATION - Finished - Edit Distances \\n{}\"\n .format(list_val_edit_distance))", "sub_path": "run/train_full.py", "file_name": "train_full.py", "file_ext": "py", "file_size_in_byte": 16125, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "sys.dont_write_bytecode", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 30, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.enable_eager_execution", "line_number": 34, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 34, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 40, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 40, "usage_type": "call"}, {"api_name": "logging.root.removeHandler", "line_number": 57, "usage_type": "call"}, {"api_name": "logging.root", "line_number": 57, "usage_type": "attribute"}, {"api_name": "absl.logging.logging", "line_number": 57, "usage_type": "attribute"}, {"api_name": "absl.logging", "line_number": 57, "usage_type": "name"}, {"api_name": "absl.logging.logging", "line_number": 58, "usage_type": "attribute"}, {"api_name": "absl.logging", "line_number": 58, "usage_type": "name"}, {"api_name": "pandas.datetime.now", "line_number": 59, "usage_type": "call"}, {"api_name": "pandas.datetime", "line_number": 59, "usage_type": "attribute"}, {"api_name": "pandas.datetime.now", "line_number": 60, "usage_type": "call"}, {"api_name": "pandas.datetime", "line_number": 60, "usage_type": "attribute"}, {"api_name": "pandas.datetime.now", "line_number": 61, "usage_type": "call"}, {"api_name": "pandas.datetime", "line_number": 61, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 62, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 63, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 67, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 68, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 69, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 72, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.__version__", "line_number": 77, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 78, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 80, "usage_type": "call"}, {"api_name": "tensorflow.python.client.device_lib.list_local_devices", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.python.client.device_lib", "line_number": 81, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.debugging.assert_equal", "line_number": 82, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 82, "usage_type": "attribute"}, {"api_name": "tensorflow.test.is_gpu_available", "line_number": 82, "usage_type": "call"}, {"api_name": "tensorflow.test", "line_number": 82, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.debugging.assert_equal", "line_number": 83, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 83, "usage_type": "attribute"}, {"api_name": "tensorflow.test.is_built_with_cuda", "line_number": 83, "usage_type": "call"}, {"api_name": "tensorflow.test", "line_number": 83, "usage_type": "attribute"}, {"api_name": "image2seq.models.eda_xu_mlp.EDAXUMLP", "line_number": 85, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 86, "usage_type": "call"}, {"api_name": "image2seq.get_model_name", "line_number": 87, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 88, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 99, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.train.AdadeltaOptimizer", "line_number": 102, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 102, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 106, "usage_type": "call"}, {"api_name": "image2seq.get_model_name", "line_number": 111, "usage_type": "call"}, {"api_name": "tensorflow.train.Checkpoint", "line_number": 115, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 115, "usage_type": "attribute"}, {"api_name": "tensorflow.train.CheckpointManager", "line_number": 117, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 117, "usage_type": "attribute"}, {"api_name": "image2seq.get_model_name", "line_number": 125, "usage_type": "call"}, {"api_name": "image2seq.get_model_name", "line_number": 128, "usage_type": "call"}, {"api_name": "image2seq.set_predictions_file", "line_number": 130, "usage_type": "call"}, {"api_name": "image2seq.preprocessing.token_preprocessing.token_preprocessing", "line_number": 137, "usage_type": "call"}, {"api_name": "image2seq.preprocessing.token_preprocessing.detected_values_preprocessing", "line_number": 144, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 147, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.debugging.assert_equal", "line_number": 148, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 148, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.debugging.assert_equal", "line_number": 149, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 149, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.debugging.assert_equal", "line_number": 151, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 151, "usage_type": "attribute"}, {"api_name": "sklearn.utils.shuffle", "line_number": 156, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 162, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 168, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.debugging.assert_equal", "line_number": 171, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 171, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.debugging.assert_equal", "line_number": 172, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 172, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.debugging.assert_equal", "line_number": 173, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 173, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.debugging.assert_equal", "line_number": 174, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 174, "usage_type": "attribute"}, {"api_name": "tensorflow.data.Dataset.from_generator", "line_number": 197, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 197, "usage_type": "attribute"}, {"api_name": "tensorflow.string", "line_number": 199, "usage_type": "attribute"}, {"api_name": "tensorflow.int32", "line_number": 199, "usage_type": "attribute"}, {"api_name": "tensorflow.data.Dataset.from_generator", "line_number": 202, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 202, "usage_type": "attribute"}, {"api_name": "tensorflow.string", "line_number": 204, "usage_type": "attribute"}, {"api_name": "tensorflow.int32", "line_number": 204, "usage_type": "attribute"}, {"api_name": "tensorflow.io.read_file", "line_number": 212, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 212, "usage_type": "attribute"}, {"api_name": "tensorflow.image.decode_png", "line_number": 213, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 213, "usage_type": "attribute"}, {"api_name": "tensorflow.image.resize", "line_number": 214, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 214, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.applications.inception_v3.preprocess_input", "line_number": 215, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 215, "usage_type": "attribute"}, {"api_name": "tensorflow.numpy_function", "line_number": 220, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 222, "usage_type": "attribute"}, {"api_name": "tensorflow.int32", "line_number": 222, "usage_type": "attribute"}, {"api_name": "tensorflow.data", "line_number": 223, "usage_type": "attribute"}, {"api_name": "tensorflow.numpy_function", "line_number": 227, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 229, "usage_type": "attribute"}, {"api_name": "tensorflow.int32", "line_number": 229, "usage_type": "attribute"}, {"api_name": "tensorflow.data", "line_number": 230, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 232, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 238, "usage_type": "attribute"}, {"api_name": "tensorflow.data", "line_number": 244, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 259, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 262, "usage_type": "call"}, {"api_name": "image2seq.get_model_name", "line_number": 263, "usage_type": "call"}, {"api_name": "tensorflow.GradientTape", "line_number": 269, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.debugging.assert_equal", "line_number": 279, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 279, "usage_type": "attribute"}, {"api_name": "image2seq.get_no_lstm_variables", "line_number": 281, "usage_type": "call"}, {"api_name": "image2seq.get_no_lstm_variables", "line_number": 285, "usage_type": "call"}, {"api_name": "image2seq.get_no_lstm_variables", "line_number": 289, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 309, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 326, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 341, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 343, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 348, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 352, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 367, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 369, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 371, "usage_type": "call"}]} +{"seq_id": "346556200", "text": "import pygame\nimport Level as lvl\nimport Player as pl\nimport Menu\nimport Pause\nfrom Animation import *\n\n# Colors\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\nPINK = (255, 192, 203)\nGRAY = (128, 128, 128)\nYELLOW = (255, 255, 0)\n\n# Screen dimensions\nSCREEN_WIDTH = 1280\nSCREEN_HEIGHT = 720\n\n\ndef run():\n \"\"\" Main Program \"\"\"\n pygame.init()\n\n # Set the height and width of the screen\n screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n\n # Create the player\n player = pl.Player()\n\n # All clones\n clones = []\n\n # Create all the levels\n level_list = []\n level_list.append(lvl.Level01(player))\n level_list.append(lvl.Level02(player))\n level_list.append(lvl.Level03(player))\n\n # Set the current level\n current_level_number = 0\n current_level = level_list[current_level_number]\n current_level.__init__(player)\n\n active_sprite_list = pygame.sprite.Group()\n player.level = current_level\n active_sprite_list.add(player)\n\n # Loop until the user clicks the close button.\n done = False\n\n # Used to manage how fast the screen updates\n clock = pygame.time.Clock()\n\n\n joysticks = []\n for i in range(0, pygame.joystick.get_count()):\n joysticks.append(pygame.joystick.Joystick(i))\n joysticks[i].init()\n print(\"Detected joystick '\", joysticks[i].get_name(), \"'\")\n\n # -------- Main Program Loop -----------\n while not done:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n\n if (event.type == pygame.JOYHATMOTION and joysticks[0].get_hat(0) == (-1, 0)) or (event.type == pygame.KEYDOWN and event.key == pygame.K_LEFT):\n player.go_left()\n if (event.type == pygame.JOYHATMOTION and joysticks[0].get_hat(0) == (1, 0)) or (event.type == pygame.KEYDOWN and event.key == pygame.K_RIGHT):\n player.go_right()\n if (event.type == pygame.JOYBUTTONDOWN and joysticks[0].get_button(0)) or (event.type == pygame.JOYHATMOTION and joysticks[0].get_hat(0) == (0, 1)) or (event.type == pygame.KEYDOWN and event.key == pygame.K_UP):\n player.jump()\n if (event.type == pygame.JOYBUTTONDOWN and (joysticks[0].get_button(1) or joysticks[0].get_button(2))) or (event.type == pygame.KEYDOWN and event.key == pygame.K_r):\n right = True\n if player.current_anim != player.right_anim:\n right = False\n clones.append([player.width(), player.height(), -current_level.world_shift + player.rect.x, player.rect.y, right, player.reverse_gravity])\n player.reverse_gravity = False\n player.set_animation_gravity()\n current_level.reset(clones)\n if (event.type == pygame.JOYBUTTONDOWN and joysticks[0].get_button(6)) or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):\n player.reverse_gravity = False\n player.set_animation_gravity()\n clones.clear()\n current_level.__init__(player)\n if (event.type == pygame.JOYBUTTONDOWN and joysticks[0].get_button(3)) or (event.type == pygame.KEYDOWN and event.key == pygame.K_2):\n if player.reverse_gravity:\n player.reverse_gravity = False\n player.set_animation_gravity()\n else:\n player.reverse_gravity = True\n player.set_animation_gravity()\n\n if (event.type == pygame.JOYHATMOTION and joysticks[0].get_hat(0) == (0, 0)) or (event.type == pygame.KEYUP and event.key == pygame.K_LEFT and player.change_x < 0):\n player.stop()\n if (event.type == pygame.JOYHATMOTION and joysticks[0].get_hat(0) == (0, 0)) or (event.type == pygame.KEYUP and event.key == pygame.K_RIGHT and player.change_x > 0):\n player.stop()\n if (event.type == pygame.JOYBUTTONDOWN and joysticks[0].get_button(7)) or (event.type == pygame.KEYUP and event.key == pygame.K_p):\n player.stop()\n done = Pause.run()\n if done:\n continue\n\n # Update the player.\n active_sprite_list.update()\n\n spikes = pygame.sprite.spritecollide(player, current_level.spike_list, False)\n if len(spikes) > 0:\n player.reverse_gravity = False\n player.set_animation_gravity()\n clones.clear()\n current_level.__init__(player)\n\n goals = pygame.sprite.spritecollide(player, current_level.goal_list, False)\n if len(goals) > 0:\n player.reverse_gravity = False\n player.set_animation_gravity()\n clones.clear()\n current_level_number += 1\n if current_level_number >= len(level_list):\n done = True\n continue\n current_level = level_list[current_level_number]\n current_level.__init__(player)\n player.level = current_level\n continue\n\n # Update items in the level\n current_level.update()\n\n left_scroll = SCREEN_WIDTH * .4\n right_scroll = SCREEN_WIDTH * .6\n # If the player gets near the right side, shift the world left (-x)\n if player.rect.right >= right_scroll:\n diff = player.rect.right - right_scroll\n player.rect.right = right_scroll\n current_level.shift_world(-diff)\n\n # If the player gets near the left side, shift the world right (+x)\n if player.rect.left <= left_scroll:\n diff = left_scroll - player.rect.left\n player.rect.left = left_scroll\n current_level.shift_world(diff)\n\n # ALL CODE TO DRAW SHOULD GO BELOW THIS COMMENT\n current_level.draw(screen)\n active_sprite_list.draw(screen)\n\n # ALL CODE TO DRAW SHOULD GO ABOVE THIS COMMENT\n\n # Limit to 60 frames per second\n clock.tick(60)\n\n # Go ahead and update the screen with what we've drawn.\n pygame.display.flip()\n\n Menu.main(pygame.mixer.music.get_volume())\n", "sub_path": "Game.py", "file_name": "Game.py", "file_ext": "py", "file_size_in_byte": 6097, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pygame.init", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 25, "usage_type": "attribute"}, {"api_name": "Player.Player", "line_number": 28, "usage_type": "call"}, {"api_name": "Level.Level01", "line_number": 35, "usage_type": "call"}, {"api_name": "Level.Level02", "line_number": 36, "usage_type": "call"}, {"api_name": "Level.Level03", "line_number": 37, "usage_type": "call"}, {"api_name": "pygame.sprite.Group", "line_number": 44, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 52, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pygame.joystick.get_count", "line_number": 56, "usage_type": "call"}, {"api_name": "pygame.joystick", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pygame.joystick.Joystick", "line_number": 57, "usage_type": "call"}, {"api_name": "pygame.joystick", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 63, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 64, "usage_type": "attribute"}, {"api_name": "pygame.JOYHATMOTION", "line_number": 67, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 67, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 67, "usage_type": "attribute"}, {"api_name": "pygame.JOYHATMOTION", "line_number": 69, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 69, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 69, "usage_type": "attribute"}, {"api_name": "pygame.JOYBUTTONDOWN", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pygame.JOYHATMOTION", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pygame.JOYBUTTONDOWN", "line_number": 73, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 73, "usage_type": "attribute"}, {"api_name": "pygame.K_r", "line_number": 73, "usage_type": "attribute"}, {"api_name": "pygame.JOYBUTTONDOWN", "line_number": 81, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 81, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 81, "usage_type": "attribute"}, {"api_name": "pygame.JOYBUTTONDOWN", "line_number": 86, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 86, "usage_type": "attribute"}, {"api_name": "pygame.K_2", "line_number": 86, "usage_type": "attribute"}, {"api_name": "pygame.JOYHATMOTION", "line_number": 94, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 94, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 94, "usage_type": "attribute"}, {"api_name": "pygame.JOYHATMOTION", "line_number": 96, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 96, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 96, "usage_type": "attribute"}, {"api_name": "pygame.JOYBUTTONDOWN", "line_number": 98, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 98, "usage_type": "attribute"}, {"api_name": "pygame.K_p", "line_number": 98, "usage_type": "attribute"}, {"api_name": "Pause.run", "line_number": 100, "usage_type": "call"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 107, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 107, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 114, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 114, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 155, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 155, "usage_type": "attribute"}, {"api_name": "Menu.main", "line_number": 157, "usage_type": "call"}, {"api_name": "pygame.mixer.music.get_volume", "line_number": 157, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 157, "usage_type": "attribute"}]} +{"seq_id": "295999484", "text": "from reportlab.lib import colors\r\nfrom reportlab.platypus import Table, TableStyle\r\n\r\nfrom directory.settings import PAGE_FRAME_WIDTH, PAGE_FRAME_HEIGHT\r\n\r\ntitle_background = colors.Color(0.1, 0.5, 0.6)\r\ntitle_text_color = colors.white\r\nTITLE_SECTION_ROW_HEIGHT = PAGE_FRAME_HEIGHT * 0.95\r\n\r\ndef title_section(title_text):\r\n text = f'''{title_text}'''\r\n heading_title_table = Table([[text.upper(),]], colWidths=[PAGE_FRAME_WIDTH], rowHeights=[TITLE_SECTION_ROW_HEIGHT])\r\n heading_title_table.setStyle(\r\n TableStyle([\r\n ('BACKGROUND', (0, 0), (-1, -1), title_background),\r\n (\"TEXTCOLOR\", (0, 0), (-1, -1), title_text_color),\r\n (\"TOPPADDING\", (0, 0), (-1, -1), 0),\r\n (\"BOTTOMPADDING\", (0, 0), (-1, -1), 0),\r\n (\"LEFTPADDING\", (0, 0), (-1, -1), 7),\r\n (\"RIGHTPADDING\", (0, 0), (-1, -1), 0),\r\n (\"ALIGN\", (0, 0), (-1, -1), \"CENTER\"),\r\n (\"VALIGN\", (0, 0), (-1, -1), \"MIDDLE\"),\r\n ])\r\n )\r\n return heading_title_table", "sub_path": "directory/pages/anniversaries/elements.py", "file_name": "elements.py", "file_ext": "py", "file_size_in_byte": 1023, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "reportlab.lib.colors.Color", "line_number": 6, "usage_type": "call"}, {"api_name": "reportlab.lib.colors", "line_number": 6, "usage_type": "name"}, {"api_name": "reportlab.lib.colors.white", "line_number": 7, "usage_type": "attribute"}, {"api_name": "reportlab.lib.colors", "line_number": 7, "usage_type": "name"}, {"api_name": "directory.settings.PAGE_FRAME_HEIGHT", "line_number": 8, "usage_type": "name"}, {"api_name": "reportlab.platypus.Table", "line_number": 12, "usage_type": "call"}, {"api_name": "directory.settings.PAGE_FRAME_WIDTH", "line_number": 12, "usage_type": "name"}, {"api_name": "reportlab.platypus.TableStyle", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "25698062", "text": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom items import LagouItemLoader, LagouJobItem\nfrom utils.common import get_md5\nfrom datetime import datetime\n\nclass LagouSpider(CrawlSpider):\n name = 'lagou'\n allowed_domains = ['www.lagou.com']\n start_urls = ['https://www.lagou.com/']\n\n rules = (\n Rule(LinkExtractor(allow=r'zhaopin/.*'), follow=True),\n Rule(LinkExtractor(allow=r'gongsi/j\\d+.html'), follow=True),\n Rule(LinkExtractor(allow=r'jobs/\\d+.html'), callback='parse_job', follow=True),\n )\n headers = {\n \"HOST\": \"www.lagou.com\",\n \"Referer\": \"https://www.lagou.com/\",\n }\n\n\n\n def parse_job(self, response):\n # 解析拉勾网的职位\n\n item_loader = LagouItemLoader(item=LagouJobItem(), response=response)\n\n item_loader.add_css(\"title\", \".job-name::attr(title)\")\n item_loader.add_value(\"url\", response.url)\n item_loader.add_value(\"url_object_id\", get_md5(response.url))\n item_loader.add_css(\"salary\", \".job_request .salary::text\")\n item_loader.add_css(\"job_city\", \".job_request span:nth-child(2)::text\")\n item_loader.add_css(\"work_years\", \".job_request span:nth-child(3)::text\")\n item_loader.add_css(\"degree_need\", \".job_request span:nth-child(4)::text\")\n item_loader.add_css(\"job_type\", \".job_request span:nth-child(5)::text\")\n\n item_loader.add_css(\"publish_time\", \".publish_time::text\")\n\n item_loader.add_css(\"tags\", \".position-label li::text\")\n\n item_loader.add_css(\"job_advantage\", \".job-advantage p::text\")\n item_loader.add_css(\"job_desc\", \".job_bt p::text\")\n # 这里直接提取HTML\n item_loader.add_css(\"job_address\", \".work_addr\")\n item_loader.add_css(\"company_name\", \"#job_company dt a img::attr(alt)\")\n item_loader.add_css(\"company_url\", \"#job_company dt a::attr(href)\")\n\n item_loader.add_value(\"crawl_time\", datetime.now())\n\n item_loader.add_value(\"crawl_updatetime\", datetime.now())\n\n\n job_item = item_loader.load_item()\n\n return job_item\n\n # i = {}\n # #i['domain_id'] = response.xpath('//input[@id=\"sid\"]/@value').extract()\n # #i['name'] = response.xpath('//div[@id=\"name\"]').extract()\n # #i['description'] = response.xpath('//div[@id=\"description\"]').extract()\n # return i\n # def process_results(self, response, results):\n # return results\n #\n # def parse_start_url(self, response):\n # return []\n", "sub_path": "ArticleSpider/spiders/lagou.py", "file_name": "lagou.py", "file_ext": "py", "file_size_in_byte": 2574, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "scrapy.spiders.CrawlSpider", "line_number": 9, "usage_type": "name"}, {"api_name": "scrapy.spiders.Rule", "line_number": 15, "usage_type": "call"}, {"api_name": "scrapy.linkextractors.LinkExtractor", "line_number": 15, "usage_type": "call"}, {"api_name": "scrapy.spiders.Rule", "line_number": 16, "usage_type": "call"}, {"api_name": "scrapy.linkextractors.LinkExtractor", "line_number": 16, "usage_type": "call"}, {"api_name": "scrapy.spiders.Rule", "line_number": 17, "usage_type": "call"}, {"api_name": "scrapy.linkextractors.LinkExtractor", "line_number": 17, "usage_type": "call"}, {"api_name": "items.LagouItemLoader", "line_number": 29, "usage_type": "call"}, {"api_name": "items.LagouJobItem", "line_number": 29, "usage_type": "call"}, {"api_name": "utils.common.get_md5", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 51, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 51, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 53, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 53, "usage_type": "name"}]} +{"seq_id": "139075526", "text": "import streamlit as st\n\nfrom datetime import timedelta, datetime, date, time\nfrom helpers import *\nimport pandas as pd\n\nst.sidebar.title(\"Assetto Corsa Competizione companion\")\n\ntools = [\"Fuel calculator\", \"Endurance Stint Planner\"]\n\ntool = st.sidebar.selectbox(\"Tools\", tools)\n\nif tool == tools[0]:\n race_length = st.number_input(f\"Race length in minutes\", value=0, format=\"%d\")\n fuel_consumption = st.number_input(\"Fuel consumption in liters\", step=0.1)\n pace = st.number_input(f\"Average pace in minutes\", step=0.1)\n\n if race_length != 0 and fuel_consumption != 0 and pace != 0:\n race_length_in_seconds = race_length * 60\n pace_in_seconds = decrease_time_unit(pace)\n fuel_with_formation_lap = compute_fuel_to_add(\n race_length_in_seconds, pace_in_seconds, fuel_consumption\n )\n fuel_without_formation_lap = compute_fuel_to_add(\n race_length_in_seconds, pace_in_seconds, fuel_consumption, False\n )\n st.header(\n f\"You should add {fuel_with_formation_lap}L with a formation lap \\\n or {fuel_without_formation_lap}L without\"\n )\n st.write(\n \"\"\"\n The number of lap is estimated as the length of the stint divided by the average pace, rounded up.\n The fuel to add is the fuel consumption per lap times the number of expected laps + 3% of uncertainty, rounded up.\n An extra lap is added in case of formation lap.\n \"\"\"\n )\n\nif tool == tools[1]:\n col1, col2, col3 = st.beta_columns(3)\n with col1:\n start_of_race = st.time_input(f\"Start of race\")\n with col2:\n race_length = st.number_input(f\"Length in hours\", value=0, format=\"%d\", step=1)\n with col3:\n stint_length = st.number_input(\n f\"Stint length in minutes\", value=0, format=\"%d\", step=1\n )\n col1, col2 = st.beta_columns(2)\n with col1:\n fuel_consumption = st.number_input(\"Fuel in liters\", step=0.1)\n with col2:\n pit_stop_time_lost = st.number_input(\n \"Pit Stop time in seconds\", value=50, format=\"%d\", step=1\n )\n\n number_drivers = st.number_input(\"Number of drivers\", value=0, format=\"%d\", step=1)\n drivers = {}\n cols = st.beta_columns(max(number_drivers, 1))\n for i in range(number_drivers):\n with cols[i]:\n name = st.text_input(f\"Name of driver {i + 1}\")\n pace = st.number_input(\n f\"Average pace of driver {i + 1} in minutes\", step=0.1\n )\n drivers[i] = {\"name\": name, \"pace\": decrease_time_unit(pace)}\n drivers_sorted = list(sorted(drivers.keys(), key=lambda x: drivers[x][\"pace\"]))\n\n if race_length != 0 and fuel_consumption != 0 and pace != 0 and stint_length != 0:\n race_length_in_seconds = race_length * 3600\n max_stint_length_in_seconds = decrease_time_unit(stint_length)\n names = []\n fuels = []\n remaining_times = []\n laps = []\n stint_number = 0\n remaining_time = race_length_in_seconds\n while remaining_time > 0:\n driver = drivers_sorted[stint_number % len(drivers_sorted)]\n names.append(drivers[driver][\"name\"])\n race_h, race_m, race_s = second_to_hour_minute_second(\n race_length_in_seconds\n )\n stint_length_in_seconds = (\n min(\n max_stint_length_in_seconds - pit_stop_time_lost,\n (((120 - fuel_consumption) // fuel_consumption) - 1)\n * drivers[driver][\"pace\"]\n - pit_stop_time_lost,\n )\n if stint_number == 0\n else min(\n max_stint_length_in_seconds - pit_stop_time_lost,\n ((120 // fuel_consumption) - 1) * drivers[driver][\"pace\"]\n - pit_stop_time_lost,\n )\n )\n\n remaining_time = (\n race_length_in_seconds\n if stint_number == 0\n else remaining_times[stint_number - 1] - stint_length_in_seconds\n )\n remaining_times.append(max(remaining_time, 0))\n\n fuels.append(\n compute_fuel_to_add(\n stint_length_in_seconds - pit_stop_time_lost,\n drivers[driver][\"pace\"],\n fuel_consumption,\n formation_lap=True,\n )\n if stint_number == 0\n else compute_fuel_to_add(\n min(\n stint_length_in_seconds - pit_stop_time_lost,\n remaining_times[stint_number - 1],\n ),\n drivers[driver][\"pace\"],\n fuel_consumption,\n formation_lap=False,\n )\n )\n laps.append(\n 0\n if stint_number == 0\n else laps[stint_number - 1]\n + compute_expected_laps(\n stint_length_in_seconds, drivers[driver][\"pace\"]\n )\n )\n stint_number += 1\n\n stints = pd.DataFrame(\n {\n \"Name\": names,\n \"Remaining Time\": list(\n map(second_to_hour_minute_second_string, remaining_times)\n ),\n \"Laps\": laps,\n \"Fuel\": fuels,\n }\n )\n st.table(stints)\n\n st.write(\n \"\"\"\n The number of lap is estimated as the length of the stint divided by the average pace, rounded up.\n The fuel to add is the fuel consumption per lap times the number of expected laps + 3% of uncertainty, rounded up.\n An extra lap is added in case of formation lap.\n \"\"\"\n )\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 5840, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "streamlit.sidebar.title", "line_number": 7, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 7, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 11, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 11, "usage_type": "attribute"}, {"api_name": "streamlit.number_input", "line_number": 14, "usage_type": "call"}, {"api_name": "streamlit.number_input", "line_number": 15, "usage_type": "call"}, {"api_name": "streamlit.number_input", "line_number": 16, "usage_type": "call"}, {"api_name": "streamlit.header", "line_number": 27, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 31, "usage_type": "call"}, {"api_name": "streamlit.beta_columns", "line_number": 40, "usage_type": "call"}, {"api_name": "streamlit.time_input", "line_number": 42, "usage_type": "call"}, {"api_name": "streamlit.number_input", "line_number": 44, "usage_type": "call"}, {"api_name": "streamlit.number_input", "line_number": 46, "usage_type": "call"}, {"api_name": "streamlit.beta_columns", "line_number": 49, "usage_type": "call"}, {"api_name": "streamlit.number_input", "line_number": 51, "usage_type": "call"}, {"api_name": "streamlit.number_input", "line_number": 53, "usage_type": "call"}, {"api_name": "streamlit.number_input", "line_number": 57, "usage_type": "call"}, {"api_name": "streamlit.beta_columns", "line_number": 59, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 62, "usage_type": "call"}, {"api_name": "streamlit.number_input", "line_number": 63, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 134, "usage_type": "call"}, {"api_name": "streamlit.table", "line_number": 144, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 146, "usage_type": "call"}]} +{"seq_id": "563057152", "text": "import importlib\nfrom collections import namedtuple\nfrom functools import wraps\n\nfrom astroid import inference_tip\nfrom astroid.nodes import (\n Call,\n ImportFrom,\n Import,\n)\nfrom astroid.node_classes import NodeNG\nfrom astroid.exceptions import InferenceError\nfrom astroid.transforms import TransformVisitor\n\n\nInference = namedtuple('Inference', ('node', 'fn', 'predicate'))\nCodewatchNodeAnnotations = namedtuple(\n \"CodewatchNodeAnnotations\", [\"stats\", \"rel_file_path\"]\n)\n\n\nclass NodeVisitor(TransformVisitor):\n def __init__(self, stats, rel_file_path):\n self.stats = stats\n self.rel_file_path = rel_file_path\n super(NodeVisitor, self).__init__()\n\n def _add_codewatch_annotations(self, node):\n node._codewatch = CodewatchNodeAnnotations(\n self.stats, self.rel_file_path\n )\n return node\n\n def _transform(self, node):\n cls = node.__class__\n if cls not in self.transforms:\n # no transform registered for this class of node\n return node\n annotated_node = self._add_codewatch_annotations(node)\n return super(NodeVisitor, self)._transform(annotated_node)\n\n\ndef _astroid_interface_for_visitor(visitor_function):\n \"\"\"Turn codewatch visitors into astroid-compatible transform functions\n\n codewatch visitors can make use of 3 args, the node, stats, and the\n relative file path you were visited for\n\n astroid transforms must take only the node\n\n By annotating the node with stats and relative file path, we can make our\n codewatch visitors compatible with astroid transform functions.\n \"\"\"\n\n @wraps(visitor_function)\n def call_visitor(annotated_node, *args, **kwargs):\n return visitor_function(\n annotated_node,\n annotated_node._codewatch.stats,\n annotated_node._codewatch.rel_file_path,\n *args,\n **kwargs\n )\n\n return call_visitor\n\n\ndef inference(node, fn, predicate=None):\n return Inference(node, fn, predicate)\n\n\ndef visit(node_type, predicate=None, inferences=None):\n def decorator(fn):\n NodeVisitorMaster.register_visitor(\n node_type, fn, predicate, inferences,\n )\n return fn\n\n return decorator\n\n\ndef count_import_usages(stats_namespace, expected_qname, importer=None):\n if importer is None:\n importer = importlib.import_module\n\n module_name = '.'.join(expected_qname.split('.')[:-1])\n trouble_attribute = expected_qname.split('.')[-1]\n\n def track_import(stats, rel_file_path):\n stats.namespaced(stats_namespace).increment(rel_file_path)\n\n def visit_import(import_node, stats, rel_file_path):\n for name, alias in import_node.names:\n if name == expected_qname:\n track_import(stats, rel_file_path)\n return import_node\n\n def visit_importfrom(import_from_node, stats, rel_file_path):\n modname = import_from_node.modname\n\n for name, alias in import_from_node.names:\n if name == '*':\n module = importer(module_name)\n if trouble_attribute in dir(module):\n track_import(stats, rel_file_path)\n else:\n imported_qname = modname + '.' + name\n if imported_qname == expected_qname:\n track_import(stats, rel_file_path)\n return import_from_node\n\n NodeVisitorMaster.register_visitor(Import, visit_import, None)\n NodeVisitorMaster.register_visitor(ImportFrom, visit_importfrom, None)\n\n\ndef count_calling_files(\n stats_namespace,\n expected_callable_qname,\n inferences=None,\n):\n if stats_namespace is None:\n raise Exception(\"count_calling_files() requires a valid namespace\")\n\n expected_callable_name = expected_callable_qname.split(\".\")[-1]\n\n def record_stats(stats, rel_file_path):\n stats = stats.namespaced(stats_namespace)\n stats.increment(rel_file_path)\n\n def visit_call(call_node, stats, rel_file_path):\n \"\"\"A visitor function to gather call stats.\n\n astroid.nodes.Call nodes are from one of two forms:\n symbol()\n or\n some.expression.attribute()\n\n AST are structured differently in each case. We detect and handle both.\n \"\"\"\n callable_as_attribute = hasattr(call_node.func, \"attrname\")\n if callable_as_attribute:\n callable_name = call_node.func.attrname\n else:\n if not hasattr(call_node.func, \"name\"):\n return call_node\n callable_name = call_node.func.name\n\n # Optimization: Start with a cheap guard before astroid inference\n if callable_name != expected_callable_name:\n return call_node\n\n try:\n inferred_types = call_node.func.inferred()\n except InferenceError:\n return call_node\n\n found_matching_inferred_qname = any(\n inferred_type.qname() == expected_callable_qname\n for inferred_type in inferred_types\n )\n\n if not found_matching_inferred_qname:\n return call_node\n\n record_stats(stats, rel_file_path)\n\n return call_node\n\n NodeVisitorMaster.register_visitor(Call, visit_call, inferences=inferences)\n\n\nclass NodeVisitorMaster(object):\n node_visitor_registry = []\n\n @classmethod\n def register_visitor(\n cls,\n node,\n visitor_function,\n predicate=None,\n inferences=None,\n ):\n wrapped = _astroid_interface_for_visitor(visitor_function)\n\n if not issubclass(node, NodeNG):\n raise Exception(\n \"visitor_function registered for invalid node type. \"\n \"Please use a NodeNG subclass from the astroid.nodes module.\"\n )\n\n cls.node_visitor_registry.append(\n (node, wrapped, predicate, inferences)\n )\n\n @classmethod\n def _initialize_node_visitors(cls, stats, rel_file_path):\n initialized_node_visitors = []\n for (\n node,\n node_visitor_function,\n predicate,\n inferences,\n ) in cls.node_visitor_registry:\n node_visitor_obj = NodeVisitor(stats, rel_file_path)\n\n if inferences is not None:\n for inference in inferences:\n node_visitor_obj.register_transform(\n inference.node,\n inference_tip(inference.fn),\n inference.predicate,\n )\n\n node_visitor_obj.register_transform(\n node,\n node_visitor_function,\n predicate,\n )\n initialized_node_visitors.append(node_visitor_obj)\n return initialized_node_visitors\n\n @classmethod\n def visit(cls, stats, node, rel_file_path):\n node_visitors_initialized = cls._initialize_node_visitors(\n stats, rel_file_path\n )\n\n for node_visitor_initialized in node_visitors_initialized:\n node_visitor_initialized.visit(node)\n", "sub_path": "codewatch/node_visitor.py", "file_name": "node_visitor.py", "file_ext": "py", "file_size_in_byte": 7072, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "collections.namedtuple", "line_number": 16, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 17, "usage_type": "call"}, {"api_name": "astroid.transforms.TransformVisitor", "line_number": 22, "usage_type": "name"}, {"api_name": "functools.wraps", "line_number": 55, "usage_type": "call"}, {"api_name": "importlib.import_module", "line_number": 84, "usage_type": "attribute"}, {"api_name": "astroid.nodes.Import", "line_number": 112, "usage_type": "argument"}, {"api_name": "astroid.nodes.ImportFrom", "line_number": 113, "usage_type": "argument"}, {"api_name": "astroid.exceptions.InferenceError", "line_number": 154, "usage_type": "name"}, {"api_name": "astroid.nodes.Call", "line_number": 169, "usage_type": "argument"}, {"api_name": "astroid.node_classes.NodeNG", "line_number": 185, "usage_type": "argument"}, {"api_name": "astroid.inference_tip", "line_number": 210, "usage_type": "call"}]} +{"seq_id": "62284104", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass _Loss(nn.Module):\n def __init__(self, size_average=None, reduce=None, reduction='mean'):\n super(_Loss, self).__init__()\n if size_average is not None or reduce is not None:\n self.reduction = _Reduction.legacy_get_string(size_average, reduce)\n else:\n self.reduction = reduction\n\n\nclass _WeightedLoss(_Loss):\n def __init__(self, weight=None, size_average=None, reduce=None, reduction='mean'):\n super(_WeightedLoss, self).__init__(size_average, reduce, reduction)\n self.register_buffer('weight', weight)\n\n\nclass MaksedBCELoss(_WeightedLoss):\n def __init__(self, weight=None, size_average=None, reduce=None, reduction='mean'):\n super(MaksedBCELoss, self).__init__(weight, size_average, reduce, reduction)\n\n def forward(self, input, target, mask):\n input = input.masked_select(mask)\n target = target.masked_select(mask)\n return F.binary_cross_entropy(input, target, weight=self.weight, reduction=self.reduction)\n\n\nclass MaskedBCEWithLogitsLoss(_Loss):\n def __init__(self, weight=None, size_average=None, reduce=None, reduction='mean', pos_weight=None):\n super(MaskedBCEWithLogitsLoss, self).__init__(size_average, reduce, reduction)\n self.register_buffer('weight', weight)\n self.register_buffer('pos_weight', pos_weight)\n\n def forward(self, input, target):\n input = input.masked_select(mask)\n target = target.masked_select(mask)\n return F.binary_cross_entropy_with_logits(input, target,\n self.weight,\n pos_weight=self.pos_weight,\n reduction=self.reduction)\n", "sub_path": "torch_chemistry/nn/metrics/masked_binary_cross_entropy.py", "file_name": "masked_binary_cross_entropy.py", "file_ext": "py", "file_size_in_byte": 1818, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "torch.nn.Module", "line_number": 6, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 6, "usage_type": "name"}, {"api_name": "torch.nn.functional.binary_cross_entropy", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.nn.functional.binary_cross_entropy_with_logits", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 40, "usage_type": "name"}]} +{"seq_id": "250806683", "text": "from rest_framework import serializers\n\nfrom api.models import Curso, Nota, Rol, Invitation\n\nfrom .user import UserSerializer\n\nfrom django.utils import timezone\n\nclass RolSerializer(serializers.ModelSerializer):\n class Meta:\n model = Rol\n fields = '__all__'\n read_only_field = (\n 'is_teacher',\n 'is_studen'\n\n )\n\nclass CursoSerializer(serializers.ModelSerializer):\n class Meta:\n model = Curso\n fields = '__all__'\n read_only_field = (\n \"curso\"\n )\n\nclass NotaSerializer(serializers.ModelSerializer):\n class Meta:\n model = Nota\n fields = '__all__'\n read_only_field = (\n \"nota\"\n )\n\nclass EstudianteSerializer(serializers.ModelSerializer):\n user = UserSerializer(read_only=True)\n class Meta:\n model = Rol\n fields = (\n 'user',\n 'is_studen',\n 'is_teacher',\n 'is_active'\n )\n read_only_field = (\n 'user',\n 'is_teacher',\n 'is_studen'\n )\n\nclass AddStudenSerializer(serializers.Serializer):\n\n invitation_code = serializers.CharField(min_length=8)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n\n def validate_user(self, data):\n curso = self.context['curso']\n user = data\n q = Rol.objects.filter(curso=curso, user=user)\n if q.exists():\n raise serializers.ValidationError('User is already student of this curso')\n return data\n\n def validate_invitation_code(self, data):\n \n try:\n invitation = Invitation.objects.get(\n code=data,\n curso=self.context['curso'],\n used=False\n )\n except Invitation.DoesNotExist:\n raise serializers.ValidationError('Invalid invitation code.')\n self.context['invitation'] = invitation\n return data\n \n def create(self, data):\n \"\"\"Create new curso member.\"\"\"\n curso = self.context['curso']\n invitation = self.context['invitation']\n user = data['user']\n\n now = timezone.now()\n\n # studen creation\n member = Rol.objects.create(\n user=user,\n profile=user.profile,\n curso=curso,\n invited_by=invitation.issued_by\n )\n\n # Update Invitation\n invitation.used_by = user\n invitation.used = True\n invitation.used_at = now\n invitation.save()\n\n # Update issuer data\n issuer = Rol.objects.get(user=invitation.issued_by, curso=curso)\n issuer.remaining_invitations -= 1\n issuer.save()\n\n return member\n", "sub_path": "api/serializers/cursos.py", "file_name": "cursos.py", "file_ext": "py", "file_size_in_byte": 2721, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 9, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 9, "usage_type": "name"}, {"api_name": "api.models.Rol", "line_number": 11, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 19, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 19, "usage_type": "name"}, {"api_name": "api.models.Curso", "line_number": 21, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 27, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 27, "usage_type": "name"}, {"api_name": "api.models.Nota", "line_number": 29, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 35, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 35, "usage_type": "name"}, {"api_name": "user.UserSerializer", "line_number": 36, "usage_type": "call"}, {"api_name": "api.models.Rol", "line_number": 38, "usage_type": "name"}, {"api_name": "rest_framework.serializers.Serializer", "line_number": 51, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 51, "usage_type": "name"}, {"api_name": "rest_framework.serializers.CharField", "line_number": 53, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 53, "usage_type": "name"}, {"api_name": "rest_framework.serializers.HiddenField", "line_number": 54, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 54, "usage_type": "name"}, {"api_name": "rest_framework.serializers.CurrentUserDefault", "line_number": 54, "usage_type": "call"}, {"api_name": "api.models.Rol.objects.filter", "line_number": 59, "usage_type": "call"}, {"api_name": "api.models.Rol.objects", "line_number": 59, "usage_type": "attribute"}, {"api_name": "api.models.Rol", "line_number": 59, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ValidationError", "line_number": 61, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 61, "usage_type": "name"}, {"api_name": "api.models.Invitation.objects.get", "line_number": 67, "usage_type": "call"}, {"api_name": "api.models.Invitation.objects", "line_number": 67, "usage_type": "attribute"}, {"api_name": "api.models.Invitation", "line_number": 67, "usage_type": "name"}, {"api_name": "api.models.Invitation.DoesNotExist", "line_number": 72, "usage_type": "attribute"}, {"api_name": "api.models.Invitation", "line_number": 72, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ValidationError", "line_number": 73, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 73, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 83, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 83, "usage_type": "name"}, {"api_name": "api.models.Rol.objects.create", "line_number": 86, "usage_type": "call"}, {"api_name": "api.models.Rol.objects", "line_number": 86, "usage_type": "attribute"}, {"api_name": "api.models.Rol", "line_number": 86, "usage_type": "name"}, {"api_name": "user.profile", "line_number": 88, "usage_type": "attribute"}, {"api_name": "api.models.Rol.objects.get", "line_number": 100, "usage_type": "call"}, {"api_name": "api.models.Rol.objects", "line_number": 100, "usage_type": "attribute"}, {"api_name": "api.models.Rol", "line_number": 100, "usage_type": "name"}]} +{"seq_id": "19739179", "text": "from django.contrib import admin\nfrom .models import Transaction\nfrom django.conf import settings\nimport stripe\n\ndef refund_transaction(modeladmin, request, queryset):\n\n stripe.api_key = settings.STRIPE_TEST_SECRET_API_KEY\n\n for item in queryset:\n\n refund = stripe.Refund.create(\n charge=item.stripe_id\n )\n\n if refund['status'] == 'succeeded':\n newStatus = 'refunded'\n else:\n newStatus = refund['status']\n\n item.status=newStatus\n item.save()\n\n\nrefund_transaction.short_description = \"Refund the selected Transactions\"\n\nclass TransactionAdmin(admin.ModelAdmin):\n list_display = ['contract', 'status']\n ordering = ['contract']\n actions = [refund_transaction]\n readonly_fields=['status']\n\nadmin.site.register(Transaction, TransactionAdmin)", "sub_path": "orderofpi/payments/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 829, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "stripe.api_key", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.conf.settings.STRIPE_TEST_SECRET_API_KEY", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 8, "usage_type": "name"}, {"api_name": "stripe.Refund.create", "line_number": 12, "usage_type": "call"}, {"api_name": "stripe.Refund", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 27, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 33, "usage_type": "call"}, {"api_name": "models.Transaction", "line_number": 33, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 33, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 33, "usage_type": "name"}]} +{"seq_id": "528004262", "text": "# -*- coding: utf-8 -*-\n# Copyright (C) 2014 Oliver Ainsworth\n\nfrom __future__ import (absolute_import,\n unicode_literals, print_function, division)\n\nimport re\n\nimport tornado.httputil\nimport tornado.web\n\nimport doddle.response\n\n\nclass Rule(tornado.web.URLSpec):\n\n def __init__(self, rule, view_func, methods):\n self.converters = {}\n url_spec = \"\"\n start = 0\n for char in \"\\\\.^$+?[]{}|()<>\":\n rule = rule.replace(char, \"\\\\\" + char)\n for match in BaseConverter.re_rule_variable.finditer(rule):\n type_ = BaseConverter.converters.get(match.group(\"type\"),\n StringConverter)()\n identifier = match.group(\"identifier\")\n self.converters[identifier] = type_\n url_spec += rule[start:match.start()]\n start = match.end()\n regex = type_.regex\n url_spec += \"(?P<\" + identifier + \">\" + regex + \")\"\n url_spec += rule[start:]\n kwargs = {\n \"rule\": self,\n \"view_func\": view_func,\n \"methods\": methods,\n }\n super(Rule, self).__init__(url_spec, View, kwargs, view_func.__name__)\n\n def to_python(self, identifier, value):\n return self.converters[identifier].to_python(value)\n\n\nclass BaseConverter(object):\n\n name = None\n\n class __metaclass__(type):\n\n converters = {}\n\n def __new__(meta, name, bases, attrs):\n if \"name\" not in attrs:\n raise AttributeError(\"Converter must have a 'name' attribute\")\n if attrs[\"name\"] in meta.converters:\n raise KeyError(\"Converter with name '{}' \"\n \"already exists\".format(attrs[\"name\"]))\n attrs[\"converters\"] = meta.converters\n cls = type.__new__(meta, name, bases, attrs)\n if attrs[\"name\"] is not None:\n meta.converters[attrs[\"name\"]] = cls\n return cls\n\n @property\n def re_rule_variable(cls):\n names = \"|\".join(cls.converters.iterkeys())\n return re.compile(r\"<(?:(?P\" + names +\n \"):)?(?P[A-Za-z_][A-Za-z0-9_]+)>\")\n\n\nclass StringConverter(BaseConverter):\n\n name = \"str\"\n regex = r\"[^/]+\"\n\n def to_python(self, value):\n return unicode(value)\n\n\nclass IntegerConverter(BaseConverter):\n\n name = \"int\"\n regex = r\"-?\\d+\"\n\n def to_python(self, value):\n return int(value)\n\n\nclass FloatConverter(BaseConverter):\n\n name = \"float\"\n regex = r\"(\\d+\\.\\d+|\\d+|\\.\\d+|\\d+\\.)s\"\n\n def to_python(self, value):\n return float(value)\n\n\nclass PathConverter(BaseConverter):\n\n name = \"path\"\n regex = r\"[A-Za-z0-9\\-._~!$&'()*+,;=:@/]+\"\n\n def to_python(self, value):\n return unicode(value)\n\n\nclass View(tornado.web.RequestHandler):\n\n def initialize(self, rule, view_func, methods):\n self.rule = rule\n self.view_func = view_func\n self.methods = [method.upper() for method in methods]\n for method in methods:\n if method not in self.SUPPORTED_METHODS:\n raise ValueError(\"Unknown HTTP method '{}'\".format(method))\n\n def make_response(self, view_response):\n response = doddle.response.Response(\"\", 200,\n {\"content-type\": \"text/html\"})\n if isinstance(view_response, doddle.response.Response):\n return view_response\n if isinstance(view_response, bytes):\n response.content = view_response\n elif isinstance(view_response, tuple):\n content, status_or_headers, headers = \\\n view_response + (None,) * (3 - len(view_response))\n if content is None:\n raise TypeError(\"View function returned None\")\n if isinstance(status_or_headers,\n (dict, tornado.httputil.HTTPHeaders)):\n headers = status_or_headers\n status_or_headers = None\n if status_or_headers is not None:\n # TODO: differentiate between status codes and reasons\n response.status_code = status_or_headers\n if headers:\n response.headers.update(headers)\n response.content = content\n elif hasattr(view_response, \"__call__\"):\n # TODO: WSGI application delegate\n pass\n elif response is None:\n raise TypeError(\"View function returned None\")\n else:\n # Slight deviation from Flask here. Flask will default to\n # treating it as a WSGI application but it seems far more\n # reasonable to try coerce the response into a string.\n response.content = unicode(view_response)\n return response\n\n def handle(self, **kwargs):\n for identifier in kwargs:\n kwargs[identifier] = self.rule.to_python(identifier,\n kwargs[identifier])\n print(kwargs)\n if self.request.method not in self.methods:\n response = doddle.response.Response(\"405 Not Supported\", 405)\n else:\n response = self.make_response(self.view_func(**kwargs))\n self.set_status(response.status_code)\n for header, value in response.headers.iteritems():\n self.set_header(header, value)\n if response.status_code != 204:\n self.write(response.content)\n self.finish()\n\n def options(self, **kwargs):\n self.handle(**kwargs)\n\n def get(self, **kwargs):\n self.handle(**kwargs)\n\n def head(self, **kwargs):\n self.handle(**kwargs)\n\n def post(self, **kwargs):\n self.handle(**kwargs)\n\n def put(self, **kwargs):\n self.handle(**kwargs)\n\n def delete(self, **kwargs):\n self.handle(**kwargs)\n\n def patch(self, **kwargs):\n self.handle(**kwargs)\n", "sub_path": "doddle/view.py", "file_name": "view.py", "file_ext": "py", "file_size_in_byte": 5945, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "tornado.httputil.web", "line_number": 15, "usage_type": "attribute"}, {"api_name": "tornado.httputil", "line_number": 15, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 67, "usage_type": "call"}, {"api_name": "tornado.httputil.web", "line_number": 107, "usage_type": "attribute"}, {"api_name": "tornado.httputil", "line_number": 107, "usage_type": "name"}, {"api_name": "doddle.response.response.Response", "line_number": 118, "usage_type": "call"}, {"api_name": "doddle.response.response", "line_number": 118, "usage_type": "attribute"}, {"api_name": "doddle.response", "line_number": 118, "usage_type": "name"}, {"api_name": "doddle.response.response", "line_number": 120, "usage_type": "attribute"}, {"api_name": "doddle.response", "line_number": 120, "usage_type": "name"}, {"api_name": "tornado.httputil.httputil", "line_number": 130, "usage_type": "attribute"}, {"api_name": "tornado.httputil", "line_number": 130, "usage_type": "name"}, {"api_name": "doddle.response.response.Response", "line_number": 157, "usage_type": "call"}, {"api_name": "doddle.response.response", "line_number": 157, "usage_type": "attribute"}, {"api_name": "doddle.response", "line_number": 157, "usage_type": "name"}]} +{"seq_id": "612568980", "text": "\"\"\"Build script runner. To build and test run inv or invoke.\"\"\"\n# -*- coding: utf-8 -*-\nfrom time import sleep\nfrom invoke import task, run\nfrom os import environ\nfrom engine.constants import Constants\n\nMIN_COVERAGE = 60.0\nSLEEP_TIME_SECONDS = 5\nDEFAULT_TIMEOUT = 10\nBEGIN = 0\nBAD_STATUS = 1\nOK_STATUS = 0\nDB_URL = 'http://localhost:7474/db/data/'\nDOCKER_STOP = 'docker stop {0}'\nDOCKER_RM = 'docker rm {0}'\nDOCKER_PS = 'docker ps | grep {0}'\nDOCKER_NEO = 'docker run -d --name=neo --publish=7474:7474 ' \\\n '-e NEO4J_AUTH=none --publish=7687:7687 ' \\\n '--volume=$HOME/neo4j/data:/data ' \\\n '--volume=$HOME/neo4j/logs:/logs neo4j:3.1'\nSTOP_NEO = DOCKER_STOP.format('neo')\nRM_NEO = DOCKER_RM.format('neo')\nCHECK_NEO = DOCKER_PS.format('neo')\nDOCKER_ES = 'docker run -d -p 9200:9200 -p 9300:9300 --name es elasticsearch'\nSTOP_ES = DOCKER_STOP.format('es')\nRM_ES = DOCKER_RM.format('es')\nCHECK_ES = DOCKER_PS.format('es')\nTEST_COMMAND = 'nosetests --with-coverage --cover-erase --cover-package=engine'\nLINT_COMMAND = 'flake8 --ignore=D204 *.py'\nPIP_COMMAND = 'pip install -r requirements.txt'\nQUIET = environ.get('QUIET', 'true').lower() == 'true'\n\n\ndef check_url_up(url, timeout=DEFAULT_TIMEOUT, status_expected=OK_STATUS):\n \"\"\"Check a url until status or timeout.\"\"\"\n try:\n status = BAD_STATUS\n max_loop_count = int(timeout / SLEEP_TIME_SECONDS)\n loop_count = BEGIN\n while not status == status_expected and loop_count < max_loop_count:\n result = run('curl --output /dev/null '\n '--silent --head --fail ' + url)\n status = result.return_code\n if status == status_expected:\n return True\n sleep(SLEEP_TIME_SECONDS)\n return False\n except(Exception, ValueError):\n return False\n\n\n@task\ndef start_search(ctx):\n \"\"\"Start elasticsearch in a container.\"\"\"\n # noinspection PyBroadException\n try:\n stop_search(ctx)\n ctx.run(DOCKER_ES, hide=QUIET)\n except:\n pass\n\n\n@task\ndef start_db(ctx):\n \"\"\"Start graph db in a container.\"\"\"\n try:\n stop_db(ctx)\n ctx.run(DOCKER_NEO, hide=QUIET)\n except (Exception, ValueError):\n pass\n\n\n@task\ndef db(ctx):\n \"\"\"DB start/stop switch task.\"\"\"\n if get_db_running():\n stop_db(ctx)\n else:\n start_db(ctx)\n\n\n@task\ndef get_search_running(ctx):\n \"\"\"See if search service is running.\"\"\"\n try:\n ctx.run(CHECK_ES, hide=QUIET)\n return True\n except (Exception, ValueError):\n return False\n\n\n@task\ndef stop_search(ctx):\n \"\"\"Stop elasticsearch in a container.\"\"\"\n try:\n ctx.run(STOP_ES, hide=QUIET)\n ctx.run(RM_ES, hide=QUIET)\n except (Exception, ValueError):\n pass\n\n\n@task\ndef stop_db(ctx):\n \"\"\"Stop graph db in a container.\"\"\"\n try:\n ctx.run(STOP_NEO, hide=QUIET)\n ctx.run(RM_NEO, hide=QUIET)\n except (Exception, ValueError):\n pass\n\n\n@task\ndef test(ctx):\n \"\"\"Run tests.\"\"\"\n # noinspection PyBroadException\n print('Running tests...')\n ctx.run(TEST_COMMAND)\n\n\n@task\ndef lint(ctx):\n \"\"\"Run lint checking on code.\"\"\"\n print('Running lint check...')\n ctx.run(LINT_COMMAND)\n\n\n@task\ndef pip(ctx):\n \"\"\"Install pip dependencies.\"\"\"\n print('Installing requirements...')\n ctx.run(PIP_COMMAND)\n\n\n@task(default=True)\ndef build(ctx):\n \"\"\"Perform the full build process.\"\"\"\n lint(ctx)\n test(ctx)\n\n\n@task\ndef package(ctx):\n \"\"\"Perform the full build and package process.\"\"\"\n lint(ctx)\n test(ctx)\n docker(ctx)\n\n\n@task\ndef docker(ctx, hide='both'):\n \"\"\"Build docker image for containerizing the application.\"\"\"\n print('Building docker image...')\n ctx.run('docker build -t {0} .'.format(Constants.PRODUCT_NAME), hide=hide)\n stop_running_container(ctx, hide)\n remove_existing_container(ctx, hide)\n print('Starting docker container for {0}...'.format(\n Constants.PRODUCT_NAME))\n ctx.run('docker run -d -p 8000:8000 --name {0} {0}'.\n format(Constants.PRODUCT_NAME), hide=hide)\n\n\ndef get_db_running():\n \"\"\"See if graph db service is running.\"\"\"\n try:\n run(CHECK_NEO, hide=QUIET)\n result = check_url_up(DB_URL)\n return result\n except (Exception, ValueError):\n return False\n\n\ndef remove_existing_container(ctx, hide='both'):\n \"\"\"Remove the existing container if it exists.\"\"\"\n try:\n ctx.run(DOCKER_RM.format(Constants.PRODUCT_NAME), hide=hide)\n except (Exception, ValueError):\n pass\n\n\ndef stop_running_container(ctx, hide):\n \"\"\"Stop the running container if it is running.\"\"\"\n try:\n ctx.run(DOCKER_STOP.format(Constants.PRODUCT_NAME), hide=hide)\n except (Exception, ValueError):\n pass\n", "sub_path": "tasks.py", "file_name": "tasks.py", "file_ext": "py", "file_size_in_byte": 4782, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "os.environ.get", "line_number": 32, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 32, "usage_type": "name"}, {"api_name": "invoke.run", "line_number": 42, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 47, "usage_type": "call"}, {"api_name": "invoke.task", "line_number": 53, "usage_type": "name"}, {"api_name": "invoke.task", "line_number": 64, "usage_type": "name"}, {"api_name": "invoke.task", "line_number": 74, "usage_type": "name"}, {"api_name": "invoke.task", "line_number": 83, "usage_type": "name"}, {"api_name": "invoke.task", "line_number": 93, "usage_type": "name"}, {"api_name": "invoke.task", "line_number": 103, "usage_type": "name"}, {"api_name": "invoke.task", "line_number": 113, "usage_type": "name"}, {"api_name": "invoke.task", "line_number": 121, "usage_type": "name"}, {"api_name": "invoke.task", "line_number": 128, "usage_type": "name"}, {"api_name": "invoke.task", "line_number": 135, "usage_type": "call"}, {"api_name": "invoke.task", "line_number": 142, "usage_type": "name"}, {"api_name": "engine.constants.Constants.PRODUCT_NAME", "line_number": 154, "usage_type": "attribute"}, {"api_name": "engine.constants.Constants", "line_number": 154, "usage_type": "name"}, {"api_name": "engine.constants.Constants.PRODUCT_NAME", "line_number": 158, "usage_type": "attribute"}, {"api_name": "engine.constants.Constants", "line_number": 158, "usage_type": "name"}, {"api_name": "engine.constants.Constants.PRODUCT_NAME", "line_number": 160, "usage_type": "attribute"}, {"api_name": "engine.constants.Constants", "line_number": 160, "usage_type": "name"}, {"api_name": "invoke.task", "line_number": 150, "usage_type": "name"}, {"api_name": "invoke.run", "line_number": 166, "usage_type": "call"}, {"api_name": "engine.constants.Constants.PRODUCT_NAME", "line_number": 176, "usage_type": "attribute"}, {"api_name": "engine.constants.Constants", "line_number": 176, "usage_type": "name"}, {"api_name": "engine.constants.Constants.PRODUCT_NAME", "line_number": 184, "usage_type": "attribute"}, {"api_name": "engine.constants.Constants", "line_number": 184, "usage_type": "name"}]} +{"seq_id": "431133203", "text": "# -*- coding: utf-8 -*-\n# -*- coding: utf-8 -*-\n\nimport dash\n\nimport dash_core_components as dcc\n\nimport dash_daq as daq\n\nimport dash_html_components as html\n\nfrom dash.dependencies import Input, Output\n\n \n\nimport pandas as pd\n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\n \n\ndf = pd.read_csv(\"Admission_Values.csv\")\n\nX = df[df.columns.difference(['Chance of Admit ', 'Serial No.'])]\n\nY=df['Chance of Admit ']\n\n \n\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.linear_model import LinearRegression\n\n \n\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)\n\n \n\nregressor = LinearRegression() \n\nregressor.fit(X_train, Y_train)\n\n \n\n \n\n \n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\n \n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\nserver=app.server\n\n \n\napp.layout = html.Div([\n\n \n\n html.H1('Master Program Acceptance Predictor'),\n\n \n\n html.Div([ \n\n html.Label('GRE Score'),\n\n dcc.Slider(id='gre-slider',\n\n min=0, max=340, step=1, value=170,\n\n marks={\n\n 0: {'label': '0'},\n\n 100: {'label': '100'},\n\n 200: {'label': '200'},\n\n 300: {'label': '300'},\n\n 340: {'label': '340'} \n\n }),\n\n \n\nhtml.Br(),\n\nhtml.Label('TOEFL Score'),\n\ndcc.Slider(id='toefl-slider',\n\n min=0, max=120, step=1, value=60,\n\n marks={\n\n 0: {'label': '0'},\n\n 25: {'label': '25'},\n\n 50: {'label': '50'},\n\n 75: {'label': '75'},\n\n 100: {'label': '100'},\n\n 120: {'label': '120'} \n\n }),\n\n \n\nhtml.Br(),\n\nhtml.Label('University Rating'),\n\ndcc.Slider(id='rating-slider',\n\n min=0, max=5, step=1, value=3,\n\n marks={\n\n 0: {'label': '0'},\n\n 1: {'label': '1'},\n\n 2: {'label': '2'},\n\n 3: {'label': '3'},\n\n 4: {'label': '4'},\n\n 5: {'label': '5'},\n\n \n\n }),\n\n \n\nhtml.Br(),\n\nhtml.Label('Statement of Purpose'),\n\ndcc.Slider(id='sop-slider',\n\n min=0, max=5, step=1, value=3,\n\n marks={\n\n 0: {'label': '0'},\n\n 1: {'label': '1'},\n\n 2: {'label': '2'},\n\n 3: {'label': '3'},\n\n 4: {'label': '4'},\n\n 5: {'label': '5'},\n\n \n\n }),\n\n \n\nhtml.Br(),\n\nhtml.Label('Letter of Recommendation'),\n\ndcc.Slider(id='lor-slider',\n\n min=0, max=5, step=1, value=3,\n\n marks={\n\n 0: {'label': '0'},\n\n 1: {'label': '1'},\n\n 2: {'label': '2'},\n\n 3: {'label': '3'},\n\n 4: {'label': '4'},\n\n 5: {'label': '5'},\n\n \n\n }),\n\n \n\nhtml.Br(),\n\nhtml.Label('College GPA'),\n\ndcc.Slider(id='gpa-slider',\n\n min=0, max=10, step=1, value=5,\n\n marks={\n\n 0: {'label': '0'},\n\n 2: {'label': '2'},\n\n 4: {'label': '4'},\n\n 6: {'label': '6'},\n\n 8: {'label': '8'},\n\n 10: {'label': '10'},\n\n \n\n }),\n\n \n\nhtml.Br(),\n\nhtml.Label('Research Experience'),\n\ndcc.Slider(id='research-slider',\n\n min=0, max=1, step=1, value=0,\n\n marks={\n\n 0: {'label': '0'},\n\n 1: {'label': '1'},\n\n \n\n }),\n\n],className=\"pretty_container four columns\"),\n\n \n\n html.Div([\n\n \n\n daq.Gauge(\n\n id='my-gauge',\n\n showCurrentValue=True,\n\n color={\"gradient\":True,\"ranges\":{\"red\":[0,0.4],\"yellow\":[0.4,0.7],\"green\":[0.7,1]}},\n\n label=\"Probability\",\n\n max=1,\n\n min=0,\n\n value=1\n\n ),\n\n])\n\n ])\n\n \n\n \n\n@app.callback(\n\n Output('my-gauge', 'value'),\n\n [Input('gre-slider', 'value'),\n\n Input('toefl-slider', 'value'),\n\n Input('rating-slider', 'value'),\n\n Input('sop-slider', 'value'),\n\n Input('lor-slider', 'value'),\n\n Input('gpa-slider', 'value'),\n\n Input('research-slider', 'value')\n\n ])\n\ndef update_output_div(gre,\n\n toefl,\n\n rating,\n\n sop,\n\n lor,\n\n gpa,\n\n research):\n\n X_case =pd.DataFrame({'CGPA':[gpa],'GRE Score':[gre],'LOR':[lor],'Research':[research],'SOP':[sop],'TOEFL Score':[toefl],'University Rating':[rating]})\n\n Y_case = regressor.predict(X_case)\n\n \n\n return Y_case[0]\n\n \n\n \n\nif __name__ == '__main__':\n\n app.run_server()\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 4484, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pandas.read_csv", "line_number": 24, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 38, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 42, "usage_type": "call"}, {"api_name": "dash.Dash", "line_number": 56, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 62, "usage_type": "call"}, {"api_name": "dash_html_components.H1", "line_number": 66, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 70, "usage_type": "call"}, {"api_name": "dash_html_components.Label", "line_number": 72, "usage_type": "call"}, {"api_name": "dash_core_components.Slider", "line_number": 74, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 94, "usage_type": "call"}, {"api_name": "dash_html_components.Label", "line_number": 96, "usage_type": "call"}, {"api_name": "dash_core_components.Slider", "line_number": 98, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 120, "usage_type": "call"}, {"api_name": "dash_html_components.Label", "line_number": 122, "usage_type": "call"}, {"api_name": "dash_core_components.Slider", "line_number": 124, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 148, "usage_type": "call"}, {"api_name": "dash_html_components.Label", "line_number": 150, "usage_type": "call"}, {"api_name": "dash_core_components.Slider", "line_number": 152, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 176, "usage_type": "call"}, {"api_name": "dash_html_components.Label", "line_number": 178, "usage_type": "call"}, {"api_name": "dash_core_components.Slider", "line_number": 180, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 204, "usage_type": "call"}, {"api_name": "dash_html_components.Label", "line_number": 206, "usage_type": "call"}, {"api_name": "dash_core_components.Slider", "line_number": 208, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 232, "usage_type": "call"}, {"api_name": "dash_html_components.Label", "line_number": 234, "usage_type": "call"}, {"api_name": "dash_core_components.Slider", "line_number": 236, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 254, "usage_type": "call"}, {"api_name": "dash_daq.Gauge", "line_number": 258, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 318, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 286, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 288, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 290, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 292, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 294, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 296, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 298, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 300, "usage_type": "call"}]} +{"seq_id": "601041838", "text": "import http.client as http_status_codes\nfrom datetime import datetime\nfrom distutils.util import strtobool\nfrom flask_restful import Resource, request\nfrom gcg.api.controllers.controller_gcg import controller_gcg_v2, ControllerResult\nfrom gcg.exceptions import GCGValidationError\nfrom gcg.schemas import schema_request\nfrom gcg.utils import make_json_response, make_text_response\n\n\nclass GCGResource(Resource):\n\n @staticmethod\n def post():\n request_schema = schema_request.GCGAPIOpts()\n\n # Extract Header Info\n AWS_ACCESS_KEY = request.headers['X-Api-Key']\n AWS_SECRET_KEY = request.headers['X-Api-Secret']\n USER_AGENT = request.headers['User-Agent']\n\n # Extracts URL params\n url_params = request.args\n\n return_type = url_params.get(\"return_type\", \"json\")\n store_aws = bool(strtobool(url_params.get(\"store_aws\", 'false')))\n template_type = url_params.get(\"template_type\")\n name = url_params.get(\"name\", f'GCG_API_{str(datetime.now().strftime(\"%m_%d_%Y_%H_%M_%S\"))}')\n lab_name = url_params.get(\"lab_name\", f'GCG_API_{str(datetime.now().strftime(\"%m_%d_%Y\"))}')\n\n # Extracts JSON Body\n\n json_data = request.json\n\n # Generate Config\n try:\n result = controller_gcg_v2(\n data=json_data,\n store_aws=store_aws,\n store_local=False,\n template_type=template_type,\n lab_name=lab_name,\n name=name,\n )\n\n if isinstance(result, ControllerResult):\n if return_type == \"text\":\n return make_text_response(\n data=result.data,\n msg=\"Success\",\n status_code=http_status_codes.OK\n )\n else:\n return make_json_response(\n data=result.data,\n msg=\"Success\",\n status_code=http_status_codes.OK\n )\n else:\n return\n\n except GCGValidationError as err:\n return make_json_response(\n data={},\n msg=f\"Validation Error: {str(err)}\",\n status_code=http_status_codes.CONFLICT\n )\n\n\n", "sub_path": "gcg/api/resources/resource_gcg.py", "file_name": "resource_gcg.py", "file_ext": "py", "file_size_in_byte": 2324, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "flask_restful.Resource", "line_number": 11, "usage_type": "name"}, {"api_name": "gcg.schemas.schema_request.GCGAPIOpts", "line_number": 15, "usage_type": "call"}, {"api_name": "gcg.schemas.schema_request", "line_number": 15, "usage_type": "name"}, {"api_name": "flask_restful.request.headers", "line_number": 18, "usage_type": "attribute"}, {"api_name": "flask_restful.request", "line_number": 18, "usage_type": "name"}, {"api_name": "flask_restful.request.headers", "line_number": 19, "usage_type": "attribute"}, {"api_name": "flask_restful.request", "line_number": 19, "usage_type": "name"}, {"api_name": "flask_restful.request.headers", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask_restful.request", "line_number": 20, "usage_type": "name"}, {"api_name": "flask_restful.request.args", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask_restful.request", "line_number": 23, "usage_type": "name"}, {"api_name": "distutils.util.strtobool", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 29, "usage_type": "name"}, {"api_name": "flask_restful.request.json", "line_number": 33, "usage_type": "attribute"}, {"api_name": "flask_restful.request", "line_number": 33, "usage_type": "name"}, {"api_name": "gcg.api.controllers.controller_gcg.controller_gcg_v2", "line_number": 37, "usage_type": "call"}, {"api_name": "gcg.api.controllers.controller_gcg.ControllerResult", "line_number": 46, "usage_type": "argument"}, {"api_name": "gcg.utils.make_text_response", "line_number": 48, "usage_type": "call"}, {"api_name": "http.client.OK", "line_number": 51, "usage_type": "attribute"}, {"api_name": "http.client", "line_number": 51, "usage_type": "name"}, {"api_name": "gcg.utils.make_json_response", "line_number": 54, "usage_type": "call"}, {"api_name": "http.client.OK", "line_number": 57, "usage_type": "attribute"}, {"api_name": "http.client", "line_number": 57, "usage_type": "name"}, {"api_name": "gcg.exceptions.GCGValidationError", "line_number": 62, "usage_type": "name"}, {"api_name": "gcg.utils.make_json_response", "line_number": 63, "usage_type": "call"}, {"api_name": "http.client.CONFLICT", "line_number": 66, "usage_type": "attribute"}, {"api_name": "http.client", "line_number": 66, "usage_type": "name"}]} +{"seq_id": "426224354", "text": "from django.shortcuts import render\n\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom api.models import *\nfrom api.serializers import *\n\n# Create your views here.\n\n@api_view(['GET'])\ndef apiOverview(request):\n\tapi_urls = {\n\t\t'api overview' : '',\n\t\t'create' : '/task-create/',\n\t\t'list' : '/task-list/',\n\t\t'detail view' : '/task-list/',\n\t\t'update' : '/task-update/',\n\t\t'delete' : '/task-delete/',\n\t}\n\n\treturn Response(api_urls)\n\n@api_view(['GET'])\ndef showAllTasks(request):\n\ttasks = todo.objects.all()\n\tserializer = todoSerializer(tasks, many = True)\n\treturn Response(serializer.data)\n\n@api_view(['GET'])\ndef showTask(request, pk):\n\ttasks = todo.objects.get(pk=pk)\n\tserializer = todoSerializer(tasks, many = False)\n\treturn Response(serializer.data)\n\n@api_view(['POST'])\ndef createTask(request):\n\tserializer = todoSerializer(data = request.data)\n\n\tif serializer.is_valid():\n\t\tserializer.save()\n\n\treturn Response(serializer.data)\n\n@api_view(['POST'])\ndef updateTask(request, pk):\n\ttask = todo.objects.get(pk=pk)\n\tserializer = todoSerializer(instance = task, data = request.data)\n\n\tif serializer.is_valid():\n\t\tserializer.save()\n\n\treturn Response(serializer.data)\t\n\n@api_view(['DELETE'])\ndef deleteTask(request, pk):\n\ttask = todo.objects.get(pk=pk)\n\ttask.delete()\n\t\n\treturn Response('Item deleted succesfully!')\n\n", "sub_path": "api/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1381, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "rest_framework.response.Response", "line_number": 22, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 11, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 28, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 24, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 34, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 30, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 43, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 36, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 53, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 45, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 60, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "50745462", "text": "import runWorld as rw\nimport drawWorld as dw\nimport pygame as pg\nfrom random import randint\n################################################################\n\n# Initialize world\nname = \"Cat Fun. Press the mouse (but not too fast)!\"\nwidth = 1000\nheight = 700\nrw.newDisplay(width, height, name)\n\n################################################################\n\nmyimage = dw.loadImage(\"realball.bmp\")\nsecondimage = dw.loadImage(\"bballhoop.bmp\")\n\n# state -> image (IO)\ndef updateDisplay(state):\n dw.fill(dw.black)\n dw.draw(myimage, (state[0], state[2]))\n dw.draw(secondimage, (750, state[4]))\n\n\n################################################################\n\n# state -> state\ndef updateState(state):\n return(state[0]+state[1], state[1], state[2]+state[3], state[3], state[4] + state[5], state[5])\n\n################################################################\n\n# state -> bool\ndef endState(state):\n if (state[0] > width or state[0] < 0) or (state[2] > height or state[2] < 0):\n return True, print(\"Hoop Wins!\")\n if state[4] > height or state[4] < 0:\n return True, print(\"Ball Wins!\")\n if (((state[4] - 100) < state[2] < (state[4] + 100)) and (state[0] == 750)):\n return True, print(\"Ball Wins!\")\n else:\n return False\n################################################################\n\n# state -> event -> state\ndef handleEvent(state, event): \n #print(\"Handling event: \" + str(event))\n if (event.type == pg.KEYDOWN):\n if (event.key == pg.K_UP):\n newState3 = state[3] - 1\n return(state[0], state[1], state[2], newState3, state[4], state[5])\n if (event.key == pg.K_DOWN):\n newState3 = state[3] + 1\n return(state[0], state[1], state[2], newState3, state[4], state[5])\n if (event.key == pg.K_w):\n newState5 = state[5] - 1\n return(state[0], state[1], state[2], state[3], state[4], newState5)\n if (event.key == pg.K_s):\n newState5 = state[5] + 1\n return(state[0], state[1], state[2], state[3], state[4], newState5)\n else:\n return(state)\n else:\n return(state)\n\n################################################################\n\ninitState = ((randint (125, 375)), (randint(1, 3)), (randint (125,\n375)), (randint(1, 3)), height/2, (randint(1, 3)))\n\n# Run the sixmulation no faster than 60 frames per second\nframeRate=30\n\n# Run the simulation!\nrw.runWorld(initState, updateDisplay, updateState, handleEvent,\n endState, frameRate)\n", "sub_path": "CatFun.py", "file_name": "CatFun.py", "file_ext": "py", "file_size_in_byte": 2531, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "runWorld.newDisplay", "line_number": 11, "usage_type": "call"}, {"api_name": "drawWorld.loadImage", "line_number": 15, "usage_type": "call"}, {"api_name": "drawWorld.loadImage", "line_number": 16, "usage_type": "call"}, {"api_name": "drawWorld.fill", "line_number": 20, "usage_type": "call"}, {"api_name": "drawWorld.black", "line_number": 20, "usage_type": "attribute"}, {"api_name": "drawWorld.draw", "line_number": 21, "usage_type": "call"}, {"api_name": "drawWorld.draw", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pygame.K_w", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pygame.K_s", "line_number": 58, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 68, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 69, "usage_type": "call"}, {"api_name": "runWorld.runWorld", "line_number": 75, "usage_type": "call"}]} +{"seq_id": "371430782", "text": "import numpy as np\nimport sys\nimport matplotlib.pyplot as plt\nimport librosa\nimport librosa.display\nfrom scipy import signal\nfrom scipy.fftpack.realtransforms import dct\n\n\ndef hz_mel(f):\n \"\"\"\n helz => mel scale\n \"\"\"\n return 2595 * np.log(f / 700.0 + 1.0)\n\n\ndef mel_hz(m):\n \"\"\"\n mel scale => helz\n \"\"\"\n return 700 * (np.exp(m / 2595) - 1.0)\n\n\ndef melFilterBank(fs, N, n_Channels):\n \"\"\"\n create melFilterBank\n\n Parameters\n ----------\n fs : sampling frequency\n N : number of samples of FFT\n n_Channel : number of filters\n\n returns\n -------\n filterbank\n \"\"\"\n # Nyquist frequency(Hz)\n fmax = fs / 2\n # Nyquist frequency(mel)\n melmax = hz_mel(fmax)\n # max index of frequency\n nmax = N // 2\n # frequency resolution\n df = fs / N\n # the center frequency of each filter no Mel scale\n dmel = melmax / (n_Channels + 1)\n melcenters = np.arange(1, n_Channels + 1) * dmel\n # convert the center frequency of each filter to frequency\n fcenters = mel_hz(melcenters)\n # convert to index of frequency\n indexcenter = np.round(fcenters / df)\n # index of the start position of each filter\n indexstart = np.hstack(([0], indexcenter[0 : n_Channels - 1]))\n # index of the stop position of each filter\n indexstop = np.hstack((indexcenter[1:n_Channels], [nmax]))\n filterbank = np.zeros((n_Channels, nmax))\n for c in range(0, n_Channels):\n increment = 1.0 / (indexcenter[c] - indexstart[c])\n for i in range(int(indexstart[c]), int(indexcenter[c])):\n filterbank[c, i] = (i - indexstart[c]) * increment\n decrement = 1.0 / (indexstop[c] - indexcenter[c])\n for i in range(int(indexcenter[c]), int(indexstop[c])):\n filterbank[c, i] = 1.0 - ((i - indexcenter[c]) * decrement)\n\n return filterbank\n\n\ndef gene_mfcc(s, fs, nperseg, filterbank):\n f, t, spec = signal.stft(s, fs=fs, nperseg=nperseg)\n mspec = np.dot(filterbank, np.abs(spec[:-1]))\n mspec_db = librosa.amplitude_to_db(mspec)\n ceps = dct(mspec_db, axis=0)\n mfcc = ceps[1:13]\n return spec, mspec_db, mfcc\n\n\ndef main():\n args = sys.argv\n wav_filename = args[1]\n s, fs = librosa.load(wav_filename)\n N = 2048\n\n n_Channels = 20\n filterbank = melFilterBank(fs, N, n_Channels)\n\n spec, mspec_db, mfcc = gene_mfcc(s, fs, N, filterbank)\n\n librosa.display.specshow(mfcc, sr=fs, x_axis=\"time\", y_axis=\"log\")\n plt.colorbar()\n plt.title(\"mfcc\")\n plt.savefig(\"mfcc\")\n plt.clf\n plt.close\n\n librosa.display.specshow(\n librosa.amplitude_to_db(spec), sr=fs, x_axis=\"time\", y_axis=\"log\"\n )\n plt.colorbar()\n plt.title(\"spectrogram\")\n plt.savefig(\"spec\")\n plt.clf\n plt.close\n\n librosa.display.specshow(mspec_db, sr=fs, x_axis=\"time\", y_axis=\"log\")\n plt.colorbar()\n plt.title(\"mel-spectrogram\")\n plt.savefig(\"melspec\")\n plt.clf\n plt.close\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "ex_5/hashizume/EX5_m.py", "file_name": "EX5_m.py", "file_ext": "py", "file_size_in_byte": 2955, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "numpy.log", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 57, "usage_type": "call"}, {"api_name": "scipy.signal.stft", "line_number": 70, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 70, "usage_type": "name"}, {"api_name": "numpy.dot", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 71, "usage_type": "call"}, {"api_name": "librosa.amplitude_to_db", "line_number": 72, "usage_type": "call"}, {"api_name": "scipy.fftpack.realtransforms.dct", "line_number": 73, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 79, "usage_type": "attribute"}, {"api_name": "librosa.load", "line_number": 81, "usage_type": "call"}, {"api_name": "librosa.display.specshow", "line_number": 89, "usage_type": "call"}, {"api_name": "librosa.display", "line_number": 89, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 93, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 94, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "librosa.display.specshow", "line_number": 96, "usage_type": "call"}, {"api_name": "librosa.display", "line_number": 96, "usage_type": "attribute"}, {"api_name": "librosa.amplitude_to_db", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 102, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 103, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "librosa.display.specshow", "line_number": 105, "usage_type": "call"}, {"api_name": "librosa.display", "line_number": 105, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 109, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 110, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}]} +{"seq_id": "624252407", "text": "import torch\r\nimport torch.nn as nn\r\nfrom torch.autograd import Variable\r\nimport os\r\nimport torch.utils.data as Data\r\nimport torchvision\r\n\r\n\r\nprint(torch.cuda.is_available())\r\nprint(torch.cuda.device_count())\r\nprint(torch.cuda.get_device_name())\r\n\r\nEpoch = 50\r\nBatch_Size = 1024\r\nLR = 5e-3\r\nRestore_net = 0 # 0: not restore; 1:restore net; 2:restore params;\r\nFreeze = False\r\nGamma = 0.95\r\n\r\n# define Dataload class\r\nclass LoadData():\r\n def dataload(self, root='./mnist', train=True, download=True):\r\n train_sets = torchvision.datasets.MNIST(\r\n root=root,\r\n train=train, #if True: trainsets, False: testsets;\r\n transform=torchvision.transforms.ToTensor(), # [0-255] to [0-1]\r\n download=download\r\n )\r\n train_data = Data.DataLoader(dataset=train_sets, batch_size=Batch_Size, shuffle=True, num_workers=0)\r\n return train_data\r\n\r\n\r\n# define network\r\nclass CNN(nn.Module):\r\n def __init__(self):\r\n super(CNN, self).__init__()\r\n self.conv1 = nn.Conv2d(in_channels=1, out_channels=24, kernel_size=5, padding=2) #(1,28,28)\r\n self.Re1 = nn.ReLU()\r\n self.M1 = nn.MaxPool2d(kernel_size=2, stride=2) #(24,14,14)\r\n\r\n self.conv2 = nn.Conv2d(in_channels=24, out_channels=48, kernel_size=5, padding=2)\r\n self.Re2 = nn.ReLU()\r\n self.M2 = nn.MaxPool2d(2, 2) #(48,7,7)\r\n\r\n self.conv3 = nn.Conv2d(in_channels=48, out_channels=64, kernel_size=5, padding=2)\r\n self.Re3 = nn.ReLU()\r\n self.M3 = nn.MaxPool2d(2, 2) #(64,3,3)\r\n\r\n self.Fc = nn.Linear(64 * 3 * 3, 10)\r\n\r\n def forward(self, x):\r\n x = self.conv1(x)\r\n x = self.Re1(x)\r\n x = self.M1(x)\r\n\r\n x = self.conv2(x)\r\n x = self.Re2(x)\r\n x = self.M2(x)\r\n\r\n x = self.conv3(x)\r\n x = self.Re3(x)\r\n x = self.M3(x)\r\n x = x.view(x.size(0), -1)\r\n\r\n x = self.Fc(x)\r\n return x\r\n\r\n\r\nif Restore_net == 1:\r\n net = torch.load('net.pkl')\r\nelif Restore_net == 2:\r\n net = CNN()\r\n net.load_state_dict(torch.load('net_params.pkl'))\r\nelse:\r\n net = CNN()\r\n\r\nnet = net.cuda(0)\r\n\r\nprint(net)\r\n\r\n# define optimizer\r\noptimizer = torch.optim.Adam(params=net.parameters(), lr=LR)\r\nlr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optimizer, step_size=1, gamma=Gamma)\r\n\r\n# define loss function\r\nloss_func = nn.CrossEntropyLoss()\r\n\r\n# load traindata and test data\r\ntrain_data = LoadData().dataload(root='./mnist', train=True, download=True)\r\ntest_data = torchvision.datasets.MNIST(root='./mnist/', train=False)\r\nwith torch.no_grad():\r\n test_x = Variable(torch.unsqueeze(test_data.test_data, dim=1)).type(torch.FloatTensor)/255.\r\n test_y = test_data.test_labels\r\n\r\n# train and Test\r\nfor epoch in range(Epoch):\r\n\r\n # Train\r\n ACC = 0\r\n Loss = 0\r\n if Freeze:\r\n break\r\n\r\n for step, (batch_x, batch_y) in enumerate(train_data):\r\n x = Variable(batch_x).cuda(0)\r\n y = Variable(batch_y).cuda(0)\r\n output = net(x)\r\n loss = loss_func(output, y)\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n\r\n with torch.no_grad():\r\n equal = torch.eq(torch.max(output, 1)[1], y)\r\n accuracy = torch.mean(equal.float())\r\n ACC += accuracy.item()\r\n Write_acc = ACC / (step + 1)\r\n Loss += loss.item()\r\n Write_Loss = Loss / (step + 1)\r\n print(f\"Epoch={epoch} | loss={Write_Loss} | lr={optimizer.param_groups[0]['lr']} | Train ACC={Write_acc}\")\r\n\r\n # test\r\n x = test_x.cuda(0)\r\n test_output = net(x)\r\n pred_y = torch.max(test_output.to('cpu'), 1)[1].data.numpy()\r\n accuracy = float((pred_y == test_y.data.numpy()).astype(int).sum()) / float(test_y.size(0))\r\n print(f'Epoch={epoch} | test ACC: {accuracy}')\r\n\r\n # save train acc and loss in txt\r\n trainACCTXT = open(\"./Train_Acc.txt\", 'a')\r\n trainACCTXT.write(str(Write_acc))\r\n trainACCTXT.write('\\n')\r\n trainACCTXT.close()\r\n trainLossTXT = open(\"./Train_Loss.txt\", 'a')\r\n trainLossTXT.write(str(Write_Loss))\r\n trainLossTXT.write('\\n')\r\n trainLossTXT.close()\r\n # save test acc\r\n trainACCTXT = open(\"./Test_Acc.txt\", 'a')\r\n trainACCTXT.write(str(accuracy))\r\n trainACCTXT.write('\\n')\r\n trainACCTXT.close()\r\n\r\n # save model\r\n if Write_acc == 1.0:\r\n torch.save(net, 'net.pkl') # both save net and params\r\n torch.save(net.state_dict(), 'net_params.pkl') # only save params\r\n Freeze = True\r\n lr_scheduler.step()\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "sub_path": "CNN.py", "file_name": "CNN.py", "file_ext": "py", "file_size_in_byte": 4558, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "torch.cuda.is_available", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch.cuda.device_count", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 10, "usage_type": "attribute"}, {"api_name": "torch.cuda.get_device_name", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torchvision.datasets.MNIST", "line_number": 23, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 23, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 26, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 26, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 34, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 41, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 45, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 46, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 47, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 82, "usage_type": "attribute"}, {"api_name": "torch.optim.lr_scheduler.StepLR", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 83, "usage_type": "attribute"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 86, "usage_type": "name"}, {"api_name": "torchvision.datasets.MNIST", "line_number": 90, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 90, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.unsqueeze", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 92, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.eq", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 146, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 147, "usage_type": "call"}]} +{"seq_id": "519619628", "text": "from django.db import migrations, transaction\n\nSIZE_TO_COST = {\n \"sml\": (10.00, 4.99),\n \"med\": (15.00, 5.99),\n \"lrg\": (20.00, 7.99),\n}\n\n\ndef import_sample_data(apps, schema_editor):\n Prints = apps.get_model(\"checkout\", \"Prints\")\n with transaction.atomic():\n for size, (print_cost, shipping_cost) in SIZE_TO_COST.items():\n item = Prints(\n size=size, print_cost=print_cost, shipping_cost=shipping_cost,\n )\n item.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n (\"checkout\", \"0001_initial\"),\n ]\n\n operations = [\n migrations.RunPython(import_sample_data),\n ]\n", "sub_path": "checkout/migrations/0002_import_sample_data.py", "file_name": "0002_import_sample_data.py", "file_ext": "py", "file_size_in_byte": 670, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.db.transaction.atomic", "line_number": 12, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 12, "usage_type": "name"}, {"api_name": "django.db.migrations.Migration", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.migrations.RunPython", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "3663096", "text": "from flask import request\nfrom webargs.flaskparser import use_args\nfrom webargs import fields, validate\n\nfrom sqlalchemy import create_engine,func\n\nimport marshmallow\nfrom marshmallow import post_dump\n\nfrom flask_restful import Resource\nfrom sqlalchemy.exc import IntegrityError\nfrom app.models import Bill,Session, db,ma\nfrom app.resources.utils import custom_error, ErrorCode\n\nfrom app.resources.auth import requires_auth,requires_admin\n\nimport json\n\nclass BillSchema(ma.SQLAlchemyAutoSchema):\n\n class Meta:\n model = Bill\n # Fields to be included in the output\n fields = ('id','user_id','period_start_date','period_end_date','total_cost','is_paid')\nbill_schema =BillSchema()\n\ndef update_bills(user_id):\n #sessions = Session.query.filter(Session.user_id==user_id).order_by(Session.starting_time)\n\n bills = Bill.query.filter(Bill.user_id ==user_id).order_by(Bill.period_start_date.desc())\n\n\n last_bill = bills.first()\n last_bill_date = '0000-00-00 00:00:00'\n\n if last_bill is not None:\n last_bill_date = last_bill.period_end_date\n '''\n engine = create_engine('sqlite:///app.db')\n con = engine.connect()\n rs = con.execute(\"select sum( (CAST( substr(finishing_time,12,2) AS INTEGER)-CAST(substr(starting_time,12,2) AS INTEGER))*kwh_cost ) as cost ,substr(starting_time,0,8) as month from session where user_id = ? and substr(starting_time,0,8)>? group by substr(starting_time,0,8) ;\",(user_id,last_bill_date))\n '''\n rs = db.session.query( func.sum(Session.kwh_delivered*Session.kwh_cost),func.substr(Session.starting_time,0,8)).group_by(func.substr(Session.starting_time,0,8)).filter(Session.user_id == user_id,Session.starting_time>last_bill_date)\n\n\n\n for r in rs:\n #print(r[0],r[1])\n \n b = Bill(\n user_id = user_id, \n period_start_date = str(r[1])+'-00',\n period_end_date = str(r[1])+'-31',\n total_cost = r[0],\n is_paid = False\n )\n\n print(b)\n\n db.session.add(b)\n try:\n db.session.commit()\n except IntegrityError as e:\n db.session.rollback()\n return custom_error('some sql error',[str(e._message)])\n \n \n \n\n\n\n \n\nclass BillResource(Resource):\n @requires_auth\n @use_args({\n 'user_id':fields.Int(required=True)\n },location = 'query')\n def get(self,args,token,is_admin):\n update_bills(args['user_id'])\n query = Bill.query\n res = query.filter(Bill.user_id == args['user_id']).order_by(Bill.period_start_date.desc())\n total = res.count()\n\n return{\n \"total\":total,\n \"bills\":bill_schema.dump(res.all(),many=True)\n }\n\n @requires_auth\n @use_args({\n 'bill_id':fields.Int(required=True)\n },location='query')\n def put(self,args,token,is_admin):\n \n b = Bill.query.filter(Bill.id == args['bill_id']).first()\n if b.is_paid: \n return{\n \"message\":\"bill already paid\"\n }\n\n b.is_paid = True\n try:\n db.session.commit()\n except IntegrityError as e:\n db.session.rollback()\n return custom_error('some sql error',[str(e._message)])\n\n return {\n 'message': 'OK' \n } \n ", "sub_path": "backend/app/resources/bill.py", "file_name": "bill.py", "file_ext": "py", "file_size_in_byte": 3309, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "app.models.ma.SQLAlchemyAutoSchema", "line_number": 19, "usage_type": "attribute"}, {"api_name": "app.models.ma", "line_number": 19, "usage_type": "name"}, {"api_name": "app.models.Bill", "line_number": 22, "usage_type": "name"}, {"api_name": "webargs.fields", "line_number": 24, "usage_type": "name"}, {"api_name": "app.models.Bill.query.filter", "line_number": 30, "usage_type": "call"}, {"api_name": "app.models.Bill.query", "line_number": 30, "usage_type": "attribute"}, {"api_name": "app.models.Bill", "line_number": 30, "usage_type": "name"}, {"api_name": "app.models.Bill.user_id", "line_number": 30, "usage_type": "attribute"}, {"api_name": "app.models.Bill.period_start_date.desc", "line_number": 30, "usage_type": "call"}, {"api_name": "app.models.Bill.period_start_date", "line_number": 30, "usage_type": "attribute"}, {"api_name": "app.models.db.session.query", "line_number": 43, "usage_type": "call"}, {"api_name": "app.models.db.session", "line_number": 43, "usage_type": "attribute"}, {"api_name": "app.models.db", "line_number": 43, "usage_type": "name"}, {"api_name": "sqlalchemy.func.sum", "line_number": 43, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 43, "usage_type": "name"}, {"api_name": "app.models.Session.kwh_delivered", "line_number": 43, "usage_type": "attribute"}, {"api_name": "app.models.Session", "line_number": 43, "usage_type": "name"}, {"api_name": "app.models.Session.kwh_cost", "line_number": 43, "usage_type": "attribute"}, {"api_name": "sqlalchemy.func.substr", "line_number": 43, "usage_type": "call"}, {"api_name": "app.models.Session.starting_time", "line_number": 43, "usage_type": "attribute"}, {"api_name": "app.models.Session.user_id", "line_number": 43, "usage_type": "attribute"}, {"api_name": "app.models.Bill", "line_number": 50, "usage_type": "call"}, {"api_name": "app.models.db.session.add", "line_number": 60, "usage_type": "call"}, {"api_name": "app.models.db.session", "line_number": 60, "usage_type": "attribute"}, {"api_name": "app.models.db", "line_number": 60, "usage_type": "name"}, {"api_name": "app.models.db.session.commit", "line_number": 62, "usage_type": "call"}, {"api_name": "app.models.db.session", "line_number": 62, "usage_type": "attribute"}, {"api_name": "app.models.db", "line_number": 62, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.IntegrityError", "line_number": 63, "usage_type": "name"}, {"api_name": "app.models.db.session.rollback", "line_number": 64, "usage_type": "call"}, {"api_name": "app.models.db.session", "line_number": 64, "usage_type": "attribute"}, {"api_name": "app.models.db", "line_number": 64, "usage_type": "name"}, {"api_name": "app.resources.utils.custom_error", "line_number": 65, "usage_type": "call"}, {"api_name": "flask_restful.Resource", "line_number": 74, "usage_type": "name"}, {"api_name": "app.models.Bill.query", "line_number": 81, "usage_type": "attribute"}, {"api_name": "app.models.Bill", "line_number": 81, "usage_type": "name"}, {"api_name": "app.models.Bill.user_id", "line_number": 82, "usage_type": "attribute"}, {"api_name": "app.models.Bill", "line_number": 82, "usage_type": "name"}, {"api_name": "app.models.Bill.period_start_date.desc", "line_number": 82, "usage_type": "call"}, {"api_name": "app.models.Bill.period_start_date", "line_number": 82, "usage_type": "attribute"}, {"api_name": "app.resources.auth.requires_auth", "line_number": 75, "usage_type": "name"}, {"api_name": "webargs.flaskparser.use_args", "line_number": 76, "usage_type": "call"}, {"api_name": "webargs.fields.Int", "line_number": 77, "usage_type": "call"}, {"api_name": "webargs.fields", "line_number": 77, "usage_type": "name"}, {"api_name": "app.models.Bill.query.filter", "line_number": 96, "usage_type": "call"}, {"api_name": "app.models.Bill.query", "line_number": 96, "usage_type": "attribute"}, {"api_name": "app.models.Bill", "line_number": 96, "usage_type": "name"}, {"api_name": "app.models.Bill.id", "line_number": 96, "usage_type": "attribute"}, {"api_name": "app.models.db.session.commit", "line_number": 104, "usage_type": "call"}, {"api_name": "app.models.db.session", "line_number": 104, "usage_type": "attribute"}, {"api_name": "app.models.db", "line_number": 104, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.IntegrityError", "line_number": 105, "usage_type": "name"}, {"api_name": "app.models.db.session.rollback", "line_number": 106, "usage_type": "call"}, {"api_name": "app.models.db.session", "line_number": 106, "usage_type": "attribute"}, {"api_name": "app.models.db", "line_number": 106, "usage_type": "name"}, {"api_name": "app.resources.utils.custom_error", "line_number": 107, "usage_type": "call"}, {"api_name": "app.resources.auth.requires_auth", "line_number": 90, "usage_type": "name"}, {"api_name": "webargs.flaskparser.use_args", "line_number": 91, "usage_type": "call"}, {"api_name": "webargs.fields.Int", "line_number": 92, "usage_type": "call"}, {"api_name": "webargs.fields", "line_number": 92, "usage_type": "name"}]} +{"seq_id": "341884780", "text": "\"\"\"This module calculates the most frequent words in a folder of books,\nwhich are in a specific xml-format.\"\"\"\n#!/usr/bin/env python3\n# coding: utf-8\n#PCL II, Übung 4, FS17\n#Raphael Kälin 14-727-010\n#Aufgabe 1.2\n\nimport glob\nimport xml.etree.ElementTree as ET\n\ndef getfreqwords(indir, outfile):\n \"\"\"\n This method calculates the most frequent words in a folder of books,\n which are in a specific xml-format.\n \"\"\"\n\n # Dictionary with hash number of each sentence as key and frequency as value. \n hashed_freq_sentence_dict = {}\n\n # Goes through each file in certain pattern in given directory.\n for xml_filename in glob.glob(indir + '/*mul.xml'):\n\n # Opens the current xml-file with utf-8 encoding.\n with open(xml_filename, encoding='utf-8') as current_file:\n\n # Creates a tree of the xml-file\n tree = ET.parse(current_file)\n root = tree.getroot()\n\n for article in root:\n for div_tag in article:\n # Considers only the lines with the -tag\n for sentence in div_tag.findall('s'):\n lemma_sentence = lemmatize_sentence(sentence)\n\n sentence_hash = hash(lemma_sentence)\n # If hashed sentence not in dictionary and if the sentence have at least 6 words.\n if sentence_hash not in hashed_freq_sentence_dict and len(sentence.findall('w')) >= 6:\n hashed_freq_sentence_dict[sentence_hash] = 1\n # If hashed sentence is already in dictionary.\n elif sentence_hash in hashed_freq_sentence_dict:\n hashed_freq_sentence_dict[sentence_hash] += 1\n # If hashed sentence has less than 6 words.\n else:\n pass\n\n # List of tuples (hashed_lemma, frequency), sorted by the frequency in ascending order.\n tuple_list = sorted(hashed_freq_sentence_dict.items(), key=lambda x: x[1])\n # Considers only the last 20 elements.\n tuple_list = tuple_list[-20:]\n\n###############################################################################\n# Goes again with the list of tuples (in which are the most frequent sentences\n# in form of their hash representation and correspondig frequency) through\n# all the sentences of the xml-files and compare them. If a hash representation\n# of a sentence matchs with the lemma sentence, then it adds the lemma sentence\n# with their frequency to the final_dict.\n###############################################################################\n\n # Dictionary with the format: key = lemma_sentence, value = frequency.\n final_dict = {}\n\n # Goes through each file in certain pattern in given directory.\n for xml_filename in glob.glob(indir + '/*mul.xml'):\n\n # Opens the current xml-file with utf-8 encoding.\n with open(xml_filename, encoding='utf-8') as current_file:\n\n # Creates a tree of the xml-file\n tree = ET.parse(current_file)\n root = tree.getroot()\n\n for article in root:\n for div_tag in article:\n # Considers only the lines with the -tag\n for sentence in div_tag.findall('s'):\n lemma_sentence = lemmatize_sentence(sentence)\n\n # If the lemma_sentence matchs with one element of the tuple_list.\n if hash(lemma_sentence) in [item[0] for item in tuple_list]:\n\n # Deletes the element in tuple_list after it was found and added.\n for i, temp_hash in enumerate(tuple_list):\n if temp_hash[0] == hash(lemma_sentence):\n # Adds the lemma sentence and their frequency to the final dictionary.\n final_dict[lemma_sentence] = tuple_list[i][1]\n del tuple_list[i]\n\n\n # List of tuples (lemma_sentence, frequency), sorted by the frequency in ascending order.\n final_sentence_list_sorted = sorted(final_dict.items(), key=lambda x: x[1])\n\n # Formats and fill lemma sentences and their frequency into final_string.\n final_string = ''\n for element_tuple in reversed(final_sentence_list_sorted):\n final_string += str(element_tuple[0]) + '--> ' + str(element_tuple[1]) + '\\n'\n\n # Writes final string into the output file.\n with open(outfile, 'w', encoding='utf-8') as output_file:\n output_file.write(final_string)\n\n\ndef lemmatize_sentence(sentence):\n \"\"\"\n Takes a sentence as input and creates a lemmatized sentence by combine each lemmatized word together.\n \"\"\"\n sentence_lemma = ''\n\n for word in sentence.findall('w'):\n try:\n sentence_lemma += word.attrib['lemma'] + ' '\n \n # Example: If it has a sentence of swiss-german, in which the words have no lemma.\n except KeyError:\n sentence_lemma += word.text\n\n return sentence_lemma\n\n\ndef main():\n \"\"\"\n Executes the getfreqwords method with a given directory.\n \"\"\"\n getfreqwords('SAC', 'output.txt')\n \n# Excecutes the main function\nif __name__ == '__main__':\n main()\n", "sub_path": "Aufgabe1/aufgabe1.py", "file_name": "aufgabe1.py", "file_ext": "py", "file_size_in_byte": 5308, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "glob.glob", "line_number": 22, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 28, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 28, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 65, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 71, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 71, "usage_type": "name"}]} +{"seq_id": "450403051", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport gym\nimport gym_bandits\nimport matplotlib.patches as mpatches\n\n\ndef main():\n # Number of bandits\n num_of_bandits = 10\n\n # For each episode we will run these many iterations\n iterations = 850\n episodes = 1000\n\n # Create environment - Gaussian Distribution\n env = gym.make('BanditTenArmedGaussian-v0')\n\n # Run all episodes\n ubc_rewards = run_ucb(env, num_of_bandits,iterations,episodes)\n \n plt.figure(figsize=(12, 8))\n plt.plot(ubc_rewards, color='blue')\n plt.legend(bbox_to_anchor=(1.2, 0.5))\n plt.xlabel(\"Iterations\")\n plt.ylabel(\"Average Reward\")\n greedy_patch = mpatches.Patch(color='blue', label='Upper Confidence Bounds')\n plt.legend(handles=[greedy_patch])\n plt.title(\"Average Rewards after \"\n + str(episodes) + \" Episodes\")\n plt.show()\n\n\ndef run_ucb(env, num_of_bandits, iterations, episodes):\n \"\"\"\n This method will run all the episodes with Upper Confidence Bound greedy strategy\n :param env: Bandit Gym Environment\n :param num_of_bandits: Number of bandit arms\n :param iterations: Iterations per episode\n :param episodes: Number of episodes\n :return: Array of length equal to number of episodes having mean reward per episode\n \"\"\"\n\n # Initialize total mean rewards array per episode by zero\n ubc_rewards = np.zeros(iterations)\n \n for i in range(episodes):\n print(f\"Running UCB episode:{i}\")\n\n n = 1\n action_count_per_bandit = np.ones(num_of_bandits)\n mean_reward = 0\n total_rewards = np.zeros(iterations)\n mean_reward_per_bandit = np.zeros(num_of_bandits)\n env.reset()\n c = 1\n\n for j in range(iterations):\n a = get_ucb_action(mean_reward_per_bandit, c, n, action_count_per_bandit)\n\n observation, reward, done, info = env.step(a)\n\n # Update counts\n n += 1\n action_count_per_bandit[a] += 1\n\n # Update mean rewards\n mean_reward = mean_reward + (\n reward - mean_reward) / n\n\n # Update mean rewards per bandit\n mean_reward_per_bandit[a] = mean_reward_per_bandit[a] + (\n reward - mean_reward_per_bandit[a]) / action_count_per_bandit[a]\n\n # Capture mean rewards per iteration\n total_rewards[j] = mean_reward\n\n ubc_rewards = ubc_rewards + (total_rewards - ubc_rewards) / (i + 1)\n\n return ubc_rewards\n\n\ndef get_ucb_action(mean_reward_per_bandit, c, n, action_count_per_bandit):\n return np.argmax(mean_reward_per_bandit + c * np.sqrt(\n (np.log(n)) / action_count_per_bandit))\n\nif __name__ == \"__main__\":\n main()", "sub_path": "code/multi_armed_bandits_ucb.py", "file_name": "multi_armed_bandits_ucb.py", "file_ext": "py", "file_size_in_byte": 2722, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "gym.make", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.patches.Patch", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "72357333", "text": "# -*- coding: utf-8 -*-\n\nfrom urllib import quote, urlencode\n\nfrom django.test import TestCase\nfrom django.test.client import Client\n\nfrom django.core.urlresolvers import reverse\nfrom django.template import Template, Context\n\nfrom external_links.models import LinkClick\nfrom external_links.templatetags.external_link_tags import ExternalLink\n\nDESTINATION = 'http://example.com/?param=val¶m2=val2'\n\nclass ExternalLinkTest(TestCase):\n \"\"\"\n Test External link\n \"\"\"\n\n def test_view(self):\n clicks_count = LinkClick.objects.filter(link=DESTINATION).count()\n client = Client()\n external_url = reverse('external_link')\n client.get(external_url, {'link': DESTINATION}, follow=True)\n clicks_new_count = LinkClick.objects.filter(link=DESTINATION).count()\n self.assertEqual(clicks_new_count - clicks_count, 1)\n\n def test_badrequestip(self):\n \"\"\"\n If we get a request addr that's not an IP, we don't want to \"crash\"\n \"\"\"\n clicks_count = LinkClick.objects.filter(link=DESTINATION).count()\n client = Client(REMOTE_ADDR=\"notanip\")\n external_url = reverse('external_link')\n client.get(external_url, {'link': DESTINATION}, follow=True)\n clicks_new_count = LinkClick.objects.filter(link=DESTINATION).count()\n self.assertEqual(clicks_new_count - clicks_count, 1)\n\n def test_ttag(self):\n ctx = Context()\n template = Template('{%% load external_link_tags %%}'\n '{%% external \"%s\" %%}' % DESTINATION)\n external_url = reverse('external_link')\n params = urlencode({'link': DESTINATION})\n self.assertEqual(template.render(ctx), external_url + '?' + params)\n\n def test_blocktag(self):\n external_link = ExternalLink([])\n base = 'link1: hey, hoho wee'\n\n original_text = base % {\n 'link1': DESTINATION,\n 'link2': DESTINATION\n } \n\n external_url = reverse('external_link')\n params = urlencode({'link': DESTINATION})\n\n final_dest = external_url + '?' + params\n final_text = base % {\n 'link1': final_dest,\n 'link2': final_dest,\n }\n self.assertEqual(final_text, \n external_link.replace_links(original_text))\n\n def test_blocktag_notalink(self):\n \"\"\"\n Another test of replacing a block of text. Except this one features several \"http://\" strings that shouldnt\n get mangled.\n \"\"\"\n external_link = ExternalLink([])\n base = 'http://hellodontescapeme.com ' \\\n 'http://dontgetmeeither.com/anothertrick'\n\n original_text = base % {\n 'link1': DESTINATION,\n }\n\n external_url = reverse('external_link')\n params = urlencode({'link': DESTINATION})\n\n final_dest = external_url + '?' + params\n final_text = base % {\n 'link1': final_dest,\n }\n self.assertEqual(final_text,\n external_link.replace_links(original_text))\n", "sub_path": "external_links/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 3137, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.test.TestCase", "line_number": 16, "usage_type": "name"}, {"api_name": "external_links.models.LinkClick.objects.filter", "line_number": 22, "usage_type": "call"}, {"api_name": "external_links.models.LinkClick.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "external_links.models.LinkClick", "line_number": 22, "usage_type": "name"}, {"api_name": "django.test.client.Client", "line_number": 23, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 24, "usage_type": "call"}, {"api_name": "external_links.models.LinkClick.objects.filter", "line_number": 26, "usage_type": "call"}, {"api_name": "external_links.models.LinkClick.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "external_links.models.LinkClick", "line_number": 26, "usage_type": "name"}, {"api_name": "external_links.models.LinkClick.objects.filter", "line_number": 33, "usage_type": "call"}, {"api_name": "external_links.models.LinkClick.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "external_links.models.LinkClick", "line_number": 33, "usage_type": "name"}, {"api_name": "django.test.client.Client", "line_number": 34, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 35, "usage_type": "call"}, {"api_name": "external_links.models.LinkClick.objects.filter", "line_number": 37, "usage_type": "call"}, {"api_name": "external_links.models.LinkClick.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "external_links.models.LinkClick", "line_number": 37, "usage_type": "name"}, {"api_name": "django.template.Context", "line_number": 41, "usage_type": "call"}, {"api_name": "django.template.Template", "line_number": 42, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 44, "usage_type": "call"}, {"api_name": "urllib.urlencode", "line_number": 45, "usage_type": "call"}, {"api_name": "external_links.templatetags.external_link_tags.ExternalLink", "line_number": 49, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 57, "usage_type": "call"}, {"api_name": "urllib.urlencode", "line_number": 58, "usage_type": "call"}, {"api_name": "external_links.templatetags.external_link_tags.ExternalLink", "line_number": 73, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 81, "usage_type": "call"}, {"api_name": "urllib.urlencode", "line_number": 82, "usage_type": "call"}]} +{"seq_id": "114856515", "text": "# coding: UTF-8\nfrom cssselect import GenericTranslator, SelectorError\n# import lxml.html.soupparser as soupparser\nimport lxml.etree as etree\nimport time\nimport datetime\nfrom calendar import isleap\n\n\ndef gen_days_list(start, end, gap, format):\n FMT = '%Y-%m-%d'\n month_days = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n if isleap(2016):\n month_days[2] = 29\n ret = []\n start_date = datetime.datetime(\n *time.strptime(time.strftime(start), FMT)[:6])\n end_date = datetime.datetime(*time.strptime(time.strftime(end), FMT)[:6])\n # if start_date <= end_date-datetime.timedelta(gap):\n while start_date <= end_date:\n ret.append(start_date.strftime(format))\n start_date = start_date + datetime.timedelta(gap)\n # print ret\n return ret\n\n# gap>=1\n\n\ndef format_end(start, format, gap):\n start_date = datetime.datetime(\n *time.strptime(time.strftime(start), format)[:6])\n end_date = start_date + datetime.timedelta(gap - 1)\n return end_date.strftime(format)\n\n\ndef html_parse(doc, css_selector, attr):\n try:\n x_expr = GenericTranslator().css_to_xpath(css_selector)\n dom_eles = doc.xpath(x_expr)\n if attr == 'text':\n ret = [e.text for e in doc.xpath(x_expr)]\n else:\n ret = [e.get(attr) for e in doc.xpath(x_expr)]\n # for each in ret:\n # print(each)\n return ret\n except SelectorError:\n print('Invalid selector.')\n\n\ndef domfy(html):\n return etree.HTML(html)\n", "sub_path": "monkent/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1518, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "calendar.isleap", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 16, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 17, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 18, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 18, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 31, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 32, "usage_type": "call"}, {"api_name": "cssselect.GenericTranslator", "line_number": 38, "usage_type": "call"}, {"api_name": "cssselect.SelectorError", "line_number": 47, "usage_type": "name"}, {"api_name": "lxml.etree.HTML", "line_number": 52, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 52, "usage_type": "name"}]} +{"seq_id": "522734047", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom pathlib import Path\nimport re\n\nPRINT_PATHS=False\n\nclass Indexer:\n def __init__(self):\n self.all_paths = []\n pass\n\n def initDir(self, searchdir):\n\n searchdir = Path(searchdir)\n print(searchdir)\n self.all_paths = list(Path(searchdir).glob(str(\"**/*\")))\n print(\"init dir: {0} {1}\".format(searchdir, len(self.all_paths)))\n\n\n def findFilesFast(self, extensions, searchdir=Path(\".\"), rootdir=None, skip_paths=[], additional_searchexprs = None):\n searchdir = Path(searchdir)\n\n if rootdir == None:\n rootdir = searchdir\n\n rootdir = Path(rootdir)\n\n if not isinstance(extensions, list):\n extensions = [extensions]\n\n if additional_searchexprs is None:\n additional_searchexprs = []\n elif not isinstance(additional_searchexprs, list):\n additional_searchexprs = [additional_searchexprs]\n\n searchexprs = [\".*{0}$\".format(re.escape(extension)) for extension in extensions]\n searchexprs += additional_searchexprs\n\n paths = []\n for searchexpr in searchexprs:\n # print(extension)\n regex = re.compile(searchexpr, re.IGNORECASE)\n\n #new_paths = filter(regex.match, self.all_paths)\n #new_paths = list(new_paths)\n\n new_paths = []\n for path in self.all_paths:\n if regex.match(str(path)):\n new_paths += [path]\n\n new_paths = [path.relative_to(rootdir) for path in new_paths]\n new_paths = self.skipPaths(new_paths, skip_paths)\n if new_paths:\n paths += new_paths\n print(\"{0}:\\t\\t{1}\".format(searchexpr, len(new_paths)))\n if PRINT_PATHS:\n for path in new_paths:\n print(\"\\t{}\".format(path))\n\n paths.sort()\n return paths\n\n def toGlob(self, paths):\n if isinstance(paths, list):\n return [Path(path) for path in paths]\n else:\n return Path(paths)\n\n def skipPaths(self, paths, skiplist):\n # paths = self.toGlob(paths)\n new_paths = []\n for path in paths:\n append = True\n for skip_path in skiplist:\n # TODO re-egzamine sh path matching (prev: if path.match(skip_path))\n if skip_path.lower() in str(path).lower():\n append = False\n if append:\n new_paths.append(str(path))\n return new_paths\n\n\n def fileLocations(self, files):\n dirs = []\n for file in files:\n basename = str(Path(file).parent)\n if not basename in dirs:\n # print(basename)\n dirs.append(basename)\n dirs.sort()\n return dirs\n\n\n def listToFile(self, in_list, filename):\n with open(filename, \"w\") as f:\n print(\"saving into: {}\".format(filename))\n for item in in_list:\n f.write(\"%s\\n\" % item)\n\n\n def processProj( self,\n proj_name, proj_path, search_paths, skip_paths, include_extensions, other_extensions, additional_searchexprs=None\n ):\n ## projdir\n projdir = Path(proj_path)\n projdir_abs = Path(projdir).absolute()\n proj_name = Path(proj_name).name\n\n print(\"PROJ NAME: {0}, PATH: {1}\".format(proj_name, projdir_abs))\n\n\n include_files = []\n other_files = []\n for search_path in search_paths:\n self.initDir(projdir / search_path)\n include_files += self.findFilesFast(\n extensions=include_extensions,\n searchdir=projdir / search_path,\n rootdir=projdir,\n skip_paths=skip_paths,\n )\n other_files += self.findFilesFast(\n extensions=other_extensions,\n searchdir=projdir / search_path,\n rootdir=projdir,\n skip_paths=skip_paths,\n additional_searchexprs=additional_searchexprs,\n )\n\n files = include_files + other_files\n files.sort()\n\n include_dirs = self.fileLocations(include_files)\n\n self.listToFile(files, str(Path(proj_path) / \"{0}.files\".format(proj_name)))\n self.listToFile(include_dirs, str(Path(proj_path) / \"{0}.includes\".format(proj_name)))\n\n print(len(files))\n\n", "sub_path": "index.py", "file_name": "index.py", "file_ext": "py", "file_size_in_byte": 4412, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pathlib.Path", "line_number": 16, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 18, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 22, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 23, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 28, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 38, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 44, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 68, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 70, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 89, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 108, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 109, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 110, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 138, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 139, "usage_type": "call"}]} +{"seq_id": "234633509", "text": "import re\nimport os\n\nfrom sqlite3 import connect, IntegrityError, Row\nfrom jinja2 import Template\nfrom config import db_path as config_path_db\nfrom logs import db_logger, debug_logger\n\n\nclass Column(object):\n \"\"\"\n Класс столбца таблицы\n \"\"\"\n\n def __init__(self, name, value=None, primary=False):\n self.name = name\n self.value = value\n self.primary = primary\n\n def set_value(self, new_value):\n self.value = new_value\n\n def get_value(self):\n return self.value\n\n def to_dict(self):\n return {self.name: self.value}\n\n def __str__(self):\n return f'[{self.name} = {self.value}]'\n\n def __repr__(self):\n return self.__str__()\n\n def __format__(self, format_spec):\n s = f'[{self.name} = {self.value}]'\n return f'{s: {format_spec[1:]}}'\n\n\nclass TableRow(object):\n \"\"\"\n Класс cтроки таблицы\n По сути - создает объект строки\n Также - служит формочкой\n \"\"\"\n _column_separator_width = 5\n __tablename__ = None\n row = []\n\n def to_dict(self):\n d = {column.name: column.value for column in self.row}\n return d\n\n def to_dict_without_primary(self):\n \"\"\"\n Возвращает словарь таблицы без первичного ключа\n :return:\n \"\"\"\n return {column.name: column.value for column in self.row if not column.primary}\n\n def get_primary(self):\n \"\"\"\n Возвращает первичный ключ\n :return:\n \"\"\"\n return {column.name: column.value for column in self.row if column.primary}\n\n @classmethod\n def __construct__(cls, values):\n \"\"\"\n Собирает строку переданного класса таблицы\n \"\"\"\n table = cls()\n my_row = dict(zip(table.row, values))\n for field, value in my_row.items():\n field.value = value\n return table\n\n @staticmethod\n def db_obj_to_dict(*args):\n return [column.to_dict() for column in args]\n\n def __repr__(self):\n table_str = f'<{self.__tablename__.title()}: '\n\n for field in self.row:\n table_str = f'{table_str} | {field: <{self._column_separator_width}}'\n return f'{table_str} >'\n\n def __eq__(self, other):\n for me, oth in zip(self.row, other.row):\n if me.name != oth.name or me.value != oth.value:\n return False\n else:\n return True\n\n\nclass Base(TableRow):\n \"\"\"\n Базовый класс для работы с таблицами.\n Позволяет добавлять, изменять, удалять данные из БД.\n \"\"\"\n db_path = config_path_db\n conn = connect(db_path)\n conn.row_factory = Row\n\n @staticmethod\n def get_template(filename):\n with open(os.path.join('app/models/static_sql', filename)) as template_file:\n sql = template_file.read()\n return Template(sql)\n\n def get_new_session(self):\n self.conn = connect(self.db_path)\n return self.conn.cursor()\n\n def insert_data(self):\n db_logger.info(self)\n template = self.get_template('insert_into.sql')\n row_dict = self.to_dict_without_primary()\n\n sql = template.render(tablename=self.__tablename__, fields=list(row_dict.keys()))\n debug_logger.info(sql)\n debug_logger.info(row_dict)\n\n try:\n self.get_new_session().execute(sql, row_dict)\n except IntegrityError as error:\n db_logger.error(f'Дубль по уникальному полю: {error}')\n raise\n finally:\n self.conn.commit()\n\n def delete_data(self):\n db_logger.info(self)\n\n template = self.get_template('delete_exp.sql')\n id_field = self.get_primary()\n where = self.kwargs_to_predicate_exp('and', **id_field)\n sql = template.render(table=self.__tablename__, where_expression=where)\n\n db_logger.info(sql)\n db_logger.info(id_field)\n\n self.get_new_session().execute(sql, id_field)\n db_logger.info(f'{self} deleted')\n self.conn.commit()\n\n @staticmethod\n def parse_constraint_fail(error):\n error_text = error.__repr__()[:-3]\n return re.findall(r'UNIQUE constraint failed: (\\S+)',\n error_text)\n\n def get_max_field_value(self, tablename, field):\n template = self.get_template('get_max_value.sql')\n sql = template.render(table=tablename, field=field)\n try:\n result = int(self.get_new_session().execute(sql).fetchall()[0][0])\n except TypeError:\n result = 0\n return result\n\n def select_expression(self, **kwargs):\n template = self.get_template('select_exp.sql')\n data = self.kwargs_to_predicate_exp('and', True, **kwargs)\n sql = template.render(table=self.__tablename__, data=data)\n db_logger.info(sql)\n db_logger.info(kwargs)\n result = self.get_new_session().execute(sql, kwargs).fetchall()\n result_list = []\n for result_row in result:\n result_list.append(self.__construct__(result_row))\n db_logger.info(result_list)\n return result_list\n\n def update_data(self):\n db_logger.info(self)\n template = self.get_template('update_exp.sql')\n id_field = self.row[0].to_dict()\n all_field = self.to_dict_without_primary()\n # Добавляем к общему словарю последний элемент - тот, что будет в предикате\n all_field.update(**id_field)\n where = self.kwargs_to_predicate_exp('and', True, **id_field)\n set_statement = self.kwargs_to_predicate_exp(',', **all_field)\n sql = template.render(table=self.__tablename__, set_expression=set_statement, where_expression=where)\n db_logger.info(sql)\n try:\n self.get_new_session().execute(sql, all_field)\n except IntegrityError as error:\n db_logger.error(f'Дубль по уникальному полю при апдейте: {error}')\n raise\n finally:\n self.conn.commit()\n db_logger.info(f'{self} updated')\n\n def kwargs_to_predicate_exp(self, separator, isnull=False, **kwargs):\n if len(kwargs) == 0: return '1 = 1'\n symbols = {True: 'is', False: '='}\n values = list(kwargs.keys())\n fields = list(kwargs.values())\n expression = str()\n pack = zip(fields, values)\n for value, field in pack:\n expression = f'{expression} {field} {symbols[isnull and value is None]} :{field} {separator}'\n expression = expression[:-len(separator)]\n return expression\n", "sub_path": "app/models/orm.py", "file_name": "orm.py", "file_ext": "py", "file_size_in_byte": 6830, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "config.db_path", "line_number": 103, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 104, "usage_type": "call"}, {"api_name": "sqlite3.Row", "line_number": 105, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path", "line_number": 109, "usage_type": "attribute"}, {"api_name": "jinja2.Template", "line_number": 111, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 114, "usage_type": "call"}, {"api_name": "logs.db_logger.info", "line_number": 118, "usage_type": "call"}, {"api_name": "logs.db_logger", "line_number": 118, "usage_type": "name"}, {"api_name": "logs.debug_logger.info", "line_number": 123, "usage_type": "call"}, {"api_name": "logs.debug_logger", "line_number": 123, "usage_type": "name"}, {"api_name": "logs.debug_logger.info", "line_number": 124, "usage_type": "call"}, {"api_name": "logs.debug_logger", "line_number": 124, "usage_type": "name"}, {"api_name": "sqlite3.IntegrityError", "line_number": 128, "usage_type": "name"}, {"api_name": "logs.db_logger.error", "line_number": 129, "usage_type": "call"}, {"api_name": "logs.db_logger", "line_number": 129, "usage_type": "name"}, {"api_name": "logs.db_logger.info", "line_number": 135, "usage_type": "call"}, {"api_name": "logs.db_logger", "line_number": 135, "usage_type": "name"}, {"api_name": "logs.db_logger.info", "line_number": 142, "usage_type": "call"}, {"api_name": "logs.db_logger", "line_number": 142, "usage_type": "name"}, {"api_name": "logs.db_logger.info", "line_number": 143, "usage_type": "call"}, {"api_name": "logs.db_logger", "line_number": 143, "usage_type": "name"}, {"api_name": "logs.db_logger.info", "line_number": 146, "usage_type": "call"}, {"api_name": "logs.db_logger", "line_number": 146, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 152, "usage_type": "call"}, {"api_name": "logs.db_logger.info", "line_number": 168, "usage_type": "call"}, {"api_name": "logs.db_logger", "line_number": 168, "usage_type": "name"}, {"api_name": "logs.db_logger.info", "line_number": 169, "usage_type": "call"}, {"api_name": "logs.db_logger", "line_number": 169, "usage_type": "name"}, {"api_name": "logs.db_logger.info", "line_number": 174, "usage_type": "call"}, {"api_name": "logs.db_logger", "line_number": 174, "usage_type": "name"}, {"api_name": "logs.db_logger.info", "line_number": 178, "usage_type": "call"}, {"api_name": "logs.db_logger", "line_number": 178, "usage_type": "name"}, {"api_name": "logs.db_logger.info", "line_number": 187, "usage_type": "call"}, {"api_name": "logs.db_logger", "line_number": 187, "usage_type": "name"}, {"api_name": "sqlite3.IntegrityError", "line_number": 190, "usage_type": "name"}, {"api_name": "logs.db_logger.error", "line_number": 191, "usage_type": "call"}, {"api_name": "logs.db_logger", "line_number": 191, "usage_type": "name"}, {"api_name": "logs.db_logger.info", "line_number": 195, "usage_type": "call"}, {"api_name": "logs.db_logger", "line_number": 195, "usage_type": "name"}]} +{"seq_id": "60794309", "text": "import os\nimport sys\nfrom subprocess import Popen\n\nimport dvc.logger as logger\nfrom dvc.utils import is_binary, fix_env\n\n\nclass Daemon(object): # pragma: no cover\n def _spawn_windows(self, cmd):\n from subprocess import STARTUPINFO, STARTF_USESHOWWINDOW\n\n CREATE_NEW_PROCESS_GROUP = 0x00000200\n DETACHED_PROCESS = 0x00000008\n creationflags = CREATE_NEW_PROCESS_GROUP | DETACHED_PROCESS\n\n startupinfo = STARTUPINFO()\n startupinfo.dwFlags |= STARTF_USESHOWWINDOW\n\n p = Popen(cmd,\n env=fix_env(),\n close_fds=True,\n shell=False,\n creationflags=creationflags,\n startupinfo=startupinfo)\n\n p.communicate()\n\n def _spawn_posix(self, cmd):\n # NOTE: using os._exit instead of sys.exit, because dvc built\n # with PyInstaller has trouble with SystemExit exeption and throws\n # errors such as \"[26338] Failed to execute script __main__\"\n try:\n pid = os.fork()\n if pid > 0:\n return\n except OSError:\n logger.error(\"failed at first fork\")\n os._exit(1)\n\n os.setsid()\n os.umask(0)\n\n try:\n pid = os.fork()\n if pid > 0:\n os._exit(0)\n except OSError:\n logger.error(\"failed at second fork\")\n os._exit(1)\n\n sys.stdin.close()\n sys.stdout.close()\n sys.stderr.close()\n\n p = Popen(cmd,\n env=fix_env(),\n close_fds=True,\n shell=False)\n\n p.communicate()\n\n os._exit(0)\n\n def __call__(self, args):\n cmd = [sys.executable]\n if not is_binary():\n cmd += ['-m', 'dvc']\n cmd += ['daemon', '-q'] + args\n\n logger.debug(\"Trying to spawn '{}'\".format(cmd))\n\n if os.name == 'nt':\n self._spawn_windows(cmd)\n elif os.name == 'posix':\n self._spawn_posix(cmd)\n else:\n raise NotImplementedError\n\n logger.debug(\"Spawned '{}'\".format(cmd))\n", "sub_path": "dvc/daemon.py", "file_name": "daemon.py", "file_ext": "py", "file_size_in_byte": 2114, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "subprocess.STARTUPINFO", "line_number": 17, "usage_type": "call"}, {"api_name": "subprocess.STARTF_USESHOWWINDOW", "line_number": 18, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 20, "usage_type": "call"}, {"api_name": "dvc.utils.fix_env", "line_number": 21, "usage_type": "call"}, {"api_name": "os.fork", "line_number": 34, "usage_type": "call"}, {"api_name": "dvc.logger.error", "line_number": 38, "usage_type": "call"}, {"api_name": "dvc.logger", "line_number": 38, "usage_type": "name"}, {"api_name": "os._exit", "line_number": 39, "usage_type": "call"}, {"api_name": "os.setsid", "line_number": 41, "usage_type": "call"}, {"api_name": "os.umask", "line_number": 42, "usage_type": "call"}, {"api_name": "os.fork", "line_number": 45, "usage_type": "call"}, {"api_name": "os._exit", "line_number": 47, "usage_type": "call"}, {"api_name": "dvc.logger.error", "line_number": 49, "usage_type": "call"}, {"api_name": "dvc.logger", "line_number": 49, "usage_type": "name"}, {"api_name": "os._exit", "line_number": 50, "usage_type": "call"}, {"api_name": "sys.stdin.close", "line_number": 52, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 52, "usage_type": "attribute"}, {"api_name": "sys.stdout.close", "line_number": 53, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 53, "usage_type": "attribute"}, {"api_name": "sys.stderr.close", "line_number": 54, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 54, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 56, "usage_type": "call"}, {"api_name": "dvc.utils.fix_env", "line_number": 57, "usage_type": "call"}, {"api_name": "os._exit", "line_number": 63, "usage_type": "call"}, {"api_name": "sys.executable", "line_number": 66, "usage_type": "attribute"}, {"api_name": "dvc.utils.is_binary", "line_number": 67, "usage_type": "call"}, {"api_name": "dvc.logger.debug", "line_number": 71, "usage_type": "call"}, {"api_name": "dvc.logger", "line_number": 71, "usage_type": "name"}, {"api_name": "os.name", "line_number": 73, "usage_type": "attribute"}, {"api_name": "os.name", "line_number": 75, "usage_type": "attribute"}, {"api_name": "dvc.logger.debug", "line_number": 80, "usage_type": "call"}, {"api_name": "dvc.logger", "line_number": 80, "usage_type": "name"}]} +{"seq_id": "614102009", "text": "import z3\nfrom z3 import If, Or, Extract, Concat, BitVecVal, FPVal, BitVecSort, RNE, RTZ, fpSignedToFP, fpToSBV, fpFPToFP, fpIsNaN, K, Select, Store, BV2Int\n\nimport subprocess\nimport sys\nimport traceback\nimport itertools\nimport json\n\nFloat = z3.FPSort(8, 24)\nDouble = z3.FPSort(11, 53)\n\nJAVA_HOME = \"/usr/lib/jvm/java-8-openjdk-amd64\"\nJAVA = \"{}/bin/java\".format(JAVA_HOME)\nPATH_DATA_OUTPUT = \"pathConstraints.txt\"\n\nclass StackEntry:\n def __init__(self, branchId, done, isTrue):\n self.branchId = branchId\n self.done = done\n self.isTrue = isTrue\n\n def __repr__(self):\n return '({}, {}, {})'.format(self.branchId, self.done, self.isTrue)\n\nclass Variable:\n def __init__(self, varType, varName):\n self.varType = varType\n self.varName = varName\n\nclass Assignment:\n def __init__(self, leftOp, rightOp):\n self.leftOp = leftOp\n self.rightOp = rightOp\n\n def __repr__(self):\n return '{} = {}'.format(self.leftOp, self.rightOp)\n\nclass PathConstraint:\n def __init__(self, branchId, condition, wasTrue, assignmentIndex):\n self.branchId = branchId\n self.condition = condition\n self.wasTrue = wasTrue\n self.assignmentIndex = assignmentIndex\n\n def __repr__(self):\n return '({}, {}, {}, {})'.format(self.branchId, self.condition, self.wasTrue, self.assignmentIndex)\n\nclass PathData:\n def __init__(self, inputVariables, variables, inputAssignments, assignments, pathConstraints):\n self.inputVariables = inputVariables\n self.variables = variables\n self.inputAssignments = inputAssignments\n self.assignments = assignments\n self.pathConstraints = pathConstraints\n\n def inputRepr(self):\n res = []\n i = 0\n for assign in self.inputAssignments:\n assert assign.leftOp == 'INPUT{}'.format(i)\n res.append(assign.rightOp)\n i += 1\n return '[{}]'.format(', '.join(res))\n\nclass BVInput:\n def __init__(self, val):\n self.val = val\n\n def __repr__(self):\n return repr(self.val.as_signed_long())\n\n def inputStr(self):\n return str(self.val.as_signed_long())\n\nclass FPInput:\n def __init__(self, val):\n self.val = val\n\n def __repr__(self):\n s = repr(self.val)\n if s == '+oo':\n return 'inf'\n elif s == '-oo':\n return '-inf'\n else:\n return repr(eval(s))\n\n def inputStr(self):\n val = self.val\n sbits = val.sort().sbits() \n return str((val.exponent_as_long() << (sbits - 1)) | (val.significand_as_long()))\n\ndef readPathData():\n with open(PATH_DATA_OUTPUT, 'r') as f:\n s = f.read()\n lines = s.replace('$', '_').replace('', '_init_').replace('', '_clinit_').split('\\n')\n i = 0\n inputVariables = []\n while i < len(lines):\n if len(lines[i]) == 0:\n i += 1\n break\n varType, varName = lines[i].split(' ')\n inputVariables.append(Variable(varType, varName))\n i += 1\n variables = []\n while i < len(lines):\n if len(lines[i]) == 0:\n i += 1\n break\n varType, varName = lines[i].split(' ')\n variables.append(Variable(varType, varName))\n i += 1\n inputAssignments = []\n while i < len(lines):\n if len(lines[i]) == 0:\n i += 1\n break\n leftOp, rightOp = lines[i].split(' = ')\n inputAssignments.append(Assignment(leftOp, rightOp))\n i += 1\n assignments = []\n while i < len(lines):\n if len(lines[i]) == 0:\n i += 1\n break\n leftOp, rightOp = lines[i].split(' = ')\n assignments.append(Assignment(leftOp, rightOp))\n i += 1\n pathConstraints = []\n while i < len(lines):\n if len(lines[i]) == 0:\n i += 1\n break\n branchId, condition, wasTrue, assignmentIndex = lines[i].split('; ')\n pathConstraints.append(PathConstraint(int(branchId), condition, wasTrue == 'true', int(assignmentIndex)))\n i += 1\n return PathData(inputVariables, variables, inputAssignments, assignments, pathConstraints)\n\ndef makeZ3Var(v):\n t = v.varType\n name = v.varName\n if t.startswith('INSTANCE:'):\n s = t[9:]\n if s == 'BYTE':\n return z3.Array(name, BitVecSort(32), z3.BitVecSort(8))\n elif s == 'SHORT':\n return z3.Array(name, BitVecSort(32), z3.BitVecSort(16))\n elif s == 'INT':\n return z3.Array(name, BitVecSort(32), z3.BitVecSort(32))\n elif s == 'LONG':\n return z3.Array(name, BitVecSort(32), z3.BitVecSort(64))\n elif s == 'FLOAT':\n return z3.Array(name, BitVecSort(32), Float)\n elif s == 'DOUBLE':\n return z3.Array(name, BitVecSort(32), Double)\n elif s == 'CHAR':\n return z3.Array(name, BitVecSort(32), z3.BitVecSort(16))\n else:\n raise Exception(\"unsupported type {}\".format(t))\n elif t == 'BYTE':\n return z3.BitVec(name, 8)\n elif t == 'SHORT':\n return z3.BitVec(name, 16)\n elif t == 'INT':\n return z3.BitVec(name, 32)\n elif t == 'LONG':\n return z3.BitVec(name, 64)\n elif t == 'FLOAT':\n return z3.FP(name, Float)\n elif t == 'DOUBLE':\n return z3.FP(name, Double)\n elif t == 'CHAR':\n return z3.BitVec(name, 16)\n else:\n raise Exception(\"unsupported type {}\".format(t))\n\ndef solveForInputs(sfiStack, sfiPathData):\n z3.set_default_rounding_mode(RNE())\n\n # variables for arrays\n exec(\"BYTE_Arrays = z3.Array('BYTE_Arrays', z3.BitVecSort(32), z3.ArraySort(z3.BitVecSort(32), z3.BitVecSort(8)))\")\n exec(\"BYTE_ArrayLengths = z3.Array('BYTE_ArrayLengths', z3.BitVecSort(32), z3.BitVecSort(32))\")\n exec(\"SHORT_Arrays = z3.Array('SHORT_Arrays', z3.BitVecSort(32), z3.ArraySort(z3.BitVecSort(32), z3.BitVecSort(16)))\")\n exec(\"SHORT_ArrayLengths = z3.Array('SHORT_ArrayLengths', z3.BitVecSort(32), z3.BitVecSort(32))\")\n exec(\"CHAR_Arrays = z3.Array('CHAR_Arrays', z3.BitVecSort(32), z3.ArraySort(z3.BitVecSort(32), z3.BitVecSort(16)))\")\n exec(\"CHAR_ArrayLengths = z3.Array('CHAR_ArrayLengths', z3.BitVecSort(32), z3.BitVecSort(32))\")\n exec(\"INT_Arrays = z3.Array('INT_Arrays', z3.BitVecSort(32), z3.ArraySort(z3.BitVecSort(32), z3.BitVecSort(32)))\")\n exec(\"INT_ArrayLengths = z3.Array('INT_ArrayLengths', z3.BitVecSort(32), z3.BitVecSort(32))\")\n exec(\"LONG_Arrays = z3.Array('LONG_Arrays', z3.BitVecSort(32), z3.ArraySort(z3.BitVecSort(32), z3.BitVecSort(64)))\")\n exec(\"LONG_ArrayLengths = z3.Array('LONG_ArrayLengths', z3.BitVecSort(32), z3.BitVecSort(32))\")\n exec(\"FLOAT_Arrays = z3.Array('FLOAT_Arrays', z3.BitVecSort(32), z3.ArraySort(z3.BitVecSort(32), Float))\")\n exec(\"FLOAT_ArrayLengths = z3.Array('FLOAT_ArrayLengths', z3.BitVecSort(32), z3.BitVecSort(32))\")\n exec(\"DOUBLE_Arrays = z3.Array('DOUBLE_Arrays', z3.BitVecSort(32), z3.ArraySort(z3.BitVecSort(32), Double))\")\n exec(\"DOUBLE_ArrayLengths = z3.Array('DOUBLE_ArrayLengths', z3.BitVecSort(32), z3.BitVecSort(32))\")\n\n sfiInputVars = {}\n for sfiV in itertools.chain(sfiPathData.inputVariables, sfiPathData.variables):\n sfiVar = makeZ3Var(sfiV)\n exec('{} = sfiVar'.format(sfiV.varName))\n if sfiV.varName.startswith('INPUT'):\n sfiInputVars[sfiV.varName] = sfiVar\n\n sfiSolver = z3.Solver()\n\n sfiPathIndex = 0\n sfiStop = False\n for sfiAssignIndex in range(len(sfiPathData.assignments)):\n sfiPc = sfiPathData.pathConstraints[sfiPathIndex]\n while sfiPc.assignmentIndex == sfiAssignIndex:\n try:\n sfiCond = eval(sfiPathData.pathConstraints[sfiPathIndex].condition)\n except:\n sys.stderr.write('Error when evaluating condition {}\\n'.format(sfiPathData.pathConstraints[sfiPathIndex].condition))\n raise\n if not sfiStack[sfiPathIndex].isTrue:\n sfiCond = z3.Not(sfiCond)\n sfiSolver.add(sfiCond)\n sfiPathIndex += 1\n if sfiPathIndex >= len(sfiStack):\n sfiStop = True\n break\n sfiPc = sfiPathData.pathConstraints[sfiPathIndex]\n if sfiStop:\n break\n sfiAssign = sfiPathData.assignments[sfiAssignIndex]\n try:\n exec('{} = {}'.format(sfiAssign.leftOp, sfiAssign.rightOp))\n except:\n sys.stderr.write('Error when performing assignment {}: {} = {}\\n'.format(sfiAssignIndex, sfiAssign.leftOp, sfiAssign.rightOp))\n raise\n\n while sfiPathIndex < len(sfiStack):\n sfiPc = sfiPathData.pathConstraints[sfiPathIndex]\n assert sfiPc.assignmentIndex == len(sfiPathData.assignments)\n try:\n sfiCond = eval(sfiPathData.pathConstraints[sfiPathIndex].condition)\n except:\n sys.stderr.write('Error when evaluating condition {}\\n'.format(sfiPathData.pathConstraints[sfiPathIndex].condition))\n raise\n if not sfiStack[sfiPathIndex].isTrue:\n sfiCond = z3.Not(sfiCond)\n sfiSolver.add(sfiCond)\n sfiPathIndex += 1\n\n if sfiSolver.check() == z3.sat:\n m = sfiSolver.model()\n if verbose:\n print('Solved {}'.format(sfiSolver))\n return m, sfiInputVars\n else:\n return None, None\n\ndef modelValueToInput(val):\n if val is None:\n return None\n elif type(val) is z3.BitVecNumRef:\n return BVInput(val)\n elif type(val) is z3.FPNumRef:\n return FPInput(val)\n else:\n raise Exception('unsupported model value {} (type {})'.format(val, type(val)))\n\n# returns True if the program crashed, otherwise False\ndef runInstrumentedProgram(inputs, runCommand):\n env = {\n \"JAVA_CONCOLIC_OUTPUT\": PATH_DATA_OUTPUT\n }\n for i in range(len(inputs)):\n if inputs[i] is not None:\n env[\"JAVA_CONCOLIC_INPUT{}\".format(i)] = inputs[i].inputStr()\n r = subprocess.run(runCommand, shell=True, env=env, capture_output=True)\n if len(r.stdout) > 0:\n print(r.stdout.decode('utf-8'))\n if len(r.stderr) > 0:\n print(r.stderr.decode('utf-8'))\n return r.returncode != 0\n\n# load config\nwith open(sys.argv[1], 'r') as f:\n cfg = json.loads(f.read())\n runCommand = cfg['runCommand']\n stopOnError = cfg['stopOnError']\n verbose = cfg['verbose']\n\n# depth-first exploration of program paths\nstack = [] \ninputs = []\nwhile True:\n if inputs is not None:\n foundError = runInstrumentedProgram(inputs, runCommand)\n pathData = readPathData()\n if verbose:\n print('Input {}, Path {}, Crashed? {}'.format(pathData.inputRepr(), list(map(lambda e: e.isTrue, stack)), 'Yes' if foundError else 'No'))\n else:\n print('Input {}, Crashed? {}'.format(pathData.inputRepr(), 'Yes' if foundError else 'No'))\n if foundError and stopOnError:\n break\n for i in range(len(pathData.pathConstraints)):\n pc = pathData.pathConstraints[i]\n if i >= len(stack):\n stack.append(StackEntry(pc.branchId, False, pc.wasTrue))\n else:\n entry = stack[i]\n if pc.branchId != entry.branchId or pc.wasTrue != entry.isTrue:\n raise Exception(\"program execution did not proceed as expected:\\n expected: {}\\n actual: {}\".format(\n list(map(lambda e: e.isTrue, stack)), \n list(map(lambda pc: pc.wasTrue, pathData.pathConstraints))))\n while len(stack) > 0 and stack[-1].done:\n stack.pop()\n if len(stack) == 0:\n print('Done!')\n break # done\n last = stack[-1]\n last.isTrue = not last.isTrue\n last.done = True\n model, inputVars = solveForInputs(stack, pathData)\n if model is None:\n # infeasible path; continue search without executing program\n inputs = None\n else:\n inputs = []\n for i in range(len(inputVars)):\n inputVar = inputVars['INPUT{}'.format(i)]\n inputs.append(modelValueToInput(model[inputVar]))\n", "sub_path": "concolic/concolic.py", "file_name": "concolic.py", "file_ext": "py", "file_size_in_byte": 12285, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "z3.FPSort", "line_number": 10, "usage_type": "call"}, {"api_name": "z3.FPSort", "line_number": 11, "usage_type": "call"}, {"api_name": "z3.Array", "line_number": 147, "usage_type": "call"}, {"api_name": "z3.BitVecSort", "line_number": 147, "usage_type": "call"}, {"api_name": "z3.Array", "line_number": 149, "usage_type": "call"}, {"api_name": "z3.BitVecSort", "line_number": 149, "usage_type": "call"}, {"api_name": "z3.Array", "line_number": 151, "usage_type": "call"}, {"api_name": "z3.BitVecSort", "line_number": 151, "usage_type": "call"}, {"api_name": "z3.Array", "line_number": 153, "usage_type": "call"}, {"api_name": "z3.BitVecSort", "line_number": 153, "usage_type": "call"}, {"api_name": "z3.Array", "line_number": 155, "usage_type": "call"}, {"api_name": "z3.BitVecSort", "line_number": 155, "usage_type": "call"}, {"api_name": "z3.Array", "line_number": 157, "usage_type": "call"}, {"api_name": "z3.BitVecSort", "line_number": 157, "usage_type": "call"}, {"api_name": "z3.Array", "line_number": 159, "usage_type": "call"}, {"api_name": "z3.BitVecSort", "line_number": 159, "usage_type": "call"}, {"api_name": "z3.BitVec", "line_number": 163, "usage_type": "call"}, {"api_name": "z3.BitVec", "line_number": 165, "usage_type": "call"}, {"api_name": "z3.BitVec", "line_number": 167, "usage_type": "call"}, {"api_name": "z3.BitVec", "line_number": 169, "usage_type": "call"}, {"api_name": "z3.FP", "line_number": 171, "usage_type": "call"}, {"api_name": "z3.FP", "line_number": 173, "usage_type": "call"}, {"api_name": "z3.BitVec", "line_number": 175, "usage_type": "call"}, {"api_name": "z3.set_default_rounding_mode", "line_number": 180, "usage_type": "call"}, {"api_name": "z3.RNE", "line_number": 180, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 199, "usage_type": "call"}, {"api_name": "z3.Solver", "line_number": 205, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 215, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 215, "usage_type": "attribute"}, {"api_name": "z3.Not", "line_number": 218, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 231, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 231, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 240, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 240, "usage_type": "attribute"}, {"api_name": "z3.Not", "line_number": 243, "usage_type": "call"}, {"api_name": "z3.sat", "line_number": 247, "usage_type": "attribute"}, {"api_name": "z3.BitVecNumRef", "line_number": 258, "usage_type": "attribute"}, {"api_name": "z3.FPNumRef", "line_number": 260, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 273, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 281, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 282, "usage_type": "call"}]} +{"seq_id": "576500459", "text": "import ctypes\nimport logging\nimport re\nfrom snap7.types import S7Object, longword, SrvEvent, server_statuses, cpu_statuses\nfrom snap7.error import check_error\nfrom snap7.common import load_lib\n\nlogger = logging.getLogger(__name__)\n\nclib = load_lib()\n\nipv4 = \"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$\"\n\nCALLBACK = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_void_p,\n ctypes.POINTER(SrvEvent), ctypes.c_uint)\n\n\ndef error_wrap(func):\n \"\"\"Parses a s7 error code returned the decorated function.\"\"\"\n def f(*args, **kw):\n code = func(*args, **kw)\n check_error(code, client=True)\n return f\n\n\ndef event_text(event):\n \"\"\"Returns a textual explanation of a given event object\n\n :param event: an PSrvEvent struct object\n :returns: the error string\n \"\"\"\n logger.debug(\"error text for %s\" % hex(event.EvtCode))\n len_ = 1024\n text_type = ctypes.c_char * len_\n text = text_type()\n error = clib.Srv_EventText(ctypes.byref(event), ctypes.byref(text), len_)\n check_error(error)\n return text.value\n\n\nclass Server(object):\n def __init__(self):\n logger.info(\"creating server\")\n self.pointer = S7Object(clib.Srv_Create())\n #self._set_log_callback()\n\n @error_wrap\n def register_area(self, area_code, index, userdata):\n \"\"\"Shares a memory area with the server. That memory block will be\n visible by the clients.\n \"\"\"\n size = ctypes.sizeof(userdata)\n logger.info(\"registering area %s, index %s, size %s\" % (area_code,\n index, size))\n size = ctypes.sizeof(userdata)\n return clib.Srv_RegisterArea(self.pointer, area_code, index,\n ctypes.byref(userdata), size)\n\n @error_wrap\n def set_events_callback(self, call_back):\n \"\"\"Sets the user callback that the Server object has to call when an\n event is created.\n \"\"\"\n logger.info(\"setting event callback\")\n raise NotImplementedError\n def wrap_callback(usrptr, pevent, size):\n \"\"\" Wraps python function into a ctypes function\n :param usrptr: not used\n :param pevent: a snap7 event struct\n :param size:\n :returns: should return an int\n \"\"\"\n # TODO: call the actual callback function. Somehow we can't access\n # objects in the scope of this object...\n logger.info(\"callback event: \" + event_text(pevent.contents))\n return 0\n\n return clib.Srv_SetEventsCallback(self.pointer, CALLBACK(wrap_callback))\n\n @error_wrap\n def _set_log_callback(self):\n \"\"\"Sets a callback that logs the events\n \"\"\"\n logger.debug(\"setting up event logger\")\n raise NotImplementedError\n def wrap_callback(usrptr, pevent, size):\n logger.info(\"callback event: \" + event_text(pevent.contents))\n return 0\n return clib.Srv_SetEventsCallback(self.pointer, CALLBACK(wrap_callback))\n\n @error_wrap\n def start(self):\n logger.info(\"starting server on 0.0.0.0:102\")\n return clib.Srv_Start(self.pointer)\n\n @error_wrap\n def stop(self):\n logger.info(\"stopping server\")\n return clib.Srv_Stop(self.pointer)\n\n @error_wrap\n def destroy(self):\n logger.info(\"destroying server\")\n return clib.Srv_Destroy(ctypes.byref(self.pointer))\n\n def get_status(self):\n \"\"\"Reads the server status, the Virtual CPU status and the number of\n the clients connected.\n\n :returns: server status, cpu status, client count\n \"\"\"\n logger.debug(\"get server status\")\n server_status = ctypes.c_int()\n cpu_status = ctypes.c_int()\n clients_count = ctypes.c_int()\n error = (clib.Srv_GetStatus(self.pointer, ctypes.byref(server_status),\n ctypes.byref(cpu_status),\n ctypes.byref(clients_count)))\n check_error(error)\n logger.debug(\"status server %s cpu %s clients %s\" % (server_status.value,\n cpu_status.value, clients_count.value))\n return server_statuses[server_status.value],\\\n cpu_statuses[cpu_status.value],\\\n clients_count.value\n\n @error_wrap\n def unregister_area(self, area_code, index):\n \"\"\"'Unshares' a memory area previously shared with Srv_RegisterArea().\n That memory block will be no longer visible by the clients.\n \"\"\"\n return clib.Srv_UnregisterArea(self.pointer, area_code, index)\n\n @error_wrap\n def unlock_area(self, code, index):\n \"\"\"Unlocks a previously locked shared memory area.\n \"\"\"\n logging.debug(\"unlocking area code %s index %s\" % (code, index))\n return clib.Srv_UnlockArea(self.pointer, code, index)\n\n @error_wrap\n def lock_area(self, code, index):\n \"\"\"Locks a shared memory area.\n \"\"\"\n logging.debug(\"locking area code %s index %s\" % (code, index))\n return clib.Srv_UnlockArea(self.pointer, code, index)\n\n @error_wrap\n def start_to(self, ip):\n assert re.match(ipv4, ip), '%s is invalid ipv4' % ip\n logger.info(\"starting server to %s:102\" % ip)\n return clib.Srv_Start(self.pointer, ip)\n\n @error_wrap\n def set_param(self, number, value):\n \"\"\"Sets an internal Server object parameter.\n \"\"\"\n logger.debug(\"setting param number %s to %s\" % (number, value))\n return clib.Srv_SetParam(self.pointer, number,\n ctypes.byref(ctypes.c_int(value)))\n\n @error_wrap\n def set_mask(self, kind, mask):\n \"\"\"Writes the specified filter mask.\n \"\"\"\n logger.debug(\"setting mask kind %s to %s\" % (kind, mask))\n return clib.Srv_SetMask(self.pointer, kind, mask)\n\n @error_wrap\n def set_cpu_status(self, status):\n \"\"\"Sets the Virtual CPU status.\n \"\"\"\n assert status in cpu_statuses, 'unknown cpu state %s' % status\n logger.debug(\"setting cpu status to %s\" % status)\n return clib.Srv_SetCpuStatus(self.pointer, status)\n\n def pick_event(self):\n \"\"\"Extracts an event (if available) from the Events queue.\n \"\"\"\n logger.debug(\"checking event queue\")\n event = SrvEvent()\n ready = ctypes.c_int32()\n code = clib.Srv_PickEvent(self.pointer, ctypes.byref(event),\n ctypes.byref(ready))\n check_error(code)\n if ready:\n logger.debug(\"one event ready: %s\" % event)\n return event\n logger.debug(\"no events ready\")\n\n def get_param(self, number):\n \"\"\"Reads an internal Server object parameter.\n\n int Srv_GetParam(S7Object Server, int ParamNumber, void *pValue);\n \"\"\"\n logger.debug(\"retreiving param number %s\" % number)\n value = ctypes.c_int()\n code = clib.Srv_GetParam(self.pointer, number, ctypes.byref(value))\n check_error(code)\n return value.value\n\n def get_mask(self, kind):\n \"\"\"Reads the specified filter mask.\n \"\"\"\n logger.debug(\"retrieving mask kind %s\" % kind)\n mask = longword()\n code = clib.Srv_GetMask(self.pointer, kind, ctypes.byref(mask))\n check_error(code)\n return mask\n\n @error_wrap\n def clear_events(self):\n \"\"\"Empties the Event queue.\n \"\"\"\n logger.debug(\"clearing event queue\")\n return clib.Srv_ClearEvents(self.pointer)\n", "sub_path": "snap7/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 7608, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "snap7.common.load_lib", "line_number": 10, "usage_type": "call"}, {"api_name": "ctypes.CFUNCTYPE", "line_number": 14, "usage_type": "call"}, {"api_name": "ctypes.c_int", "line_number": 14, "usage_type": "attribute"}, {"api_name": "ctypes.c_void_p", "line_number": 14, "usage_type": "attribute"}, {"api_name": "ctypes.POINTER", "line_number": 15, "usage_type": "call"}, {"api_name": "snap7.types.SrvEvent", "line_number": 15, "usage_type": "argument"}, {"api_name": "ctypes.c_uint", "line_number": 15, "usage_type": "attribute"}, {"api_name": "snap7.error.check_error", "line_number": 22, "usage_type": "call"}, {"api_name": "ctypes.c_char", "line_number": 34, "usage_type": "attribute"}, {"api_name": "ctypes.byref", "line_number": 36, "usage_type": "call"}, {"api_name": "snap7.error.check_error", "line_number": 37, "usage_type": "call"}, {"api_name": "snap7.types.S7Object", "line_number": 44, "usage_type": "call"}, {"api_name": "ctypes.sizeof", "line_number": 52, "usage_type": "call"}, {"api_name": "ctypes.sizeof", "line_number": 55, "usage_type": "call"}, {"api_name": "ctypes.byref", "line_number": 57, "usage_type": "call"}, {"api_name": "ctypes.byref", "line_number": 104, "usage_type": "call"}, {"api_name": "ctypes.c_int", "line_number": 113, "usage_type": "call"}, {"api_name": "ctypes.c_int", "line_number": 114, "usage_type": "call"}, {"api_name": "ctypes.c_int", "line_number": 115, "usage_type": "call"}, {"api_name": "ctypes.byref", "line_number": 116, "usage_type": "call"}, {"api_name": "ctypes.byref", "line_number": 117, "usage_type": "call"}, {"api_name": "ctypes.byref", "line_number": 118, "usage_type": "call"}, {"api_name": "snap7.error.check_error", "line_number": 119, "usage_type": "call"}, {"api_name": "snap7.types.server_statuses", "line_number": 122, "usage_type": "name"}, {"api_name": "snap7.types.cpu_statuses", "line_number": 123, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 137, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 144, "usage_type": "call"}, {"api_name": "re.match", "line_number": 149, "usage_type": "call"}, {"api_name": "ctypes.byref", "line_number": 159, "usage_type": "call"}, {"api_name": "ctypes.c_int", "line_number": 159, "usage_type": "call"}, {"api_name": "snap7.types.cpu_statuses", "line_number": 172, "usage_type": "name"}, {"api_name": "snap7.types.SrvEvent", "line_number": 180, "usage_type": "call"}, {"api_name": "ctypes.c_int32", "line_number": 181, "usage_type": "call"}, {"api_name": "ctypes.byref", "line_number": 182, "usage_type": "call"}, {"api_name": "ctypes.byref", "line_number": 183, "usage_type": "call"}, {"api_name": "snap7.error.check_error", "line_number": 184, "usage_type": "call"}, {"api_name": "ctypes.c_int", "line_number": 196, "usage_type": "call"}, {"api_name": "ctypes.byref", "line_number": 197, "usage_type": "call"}, {"api_name": "snap7.error.check_error", "line_number": 198, "usage_type": "call"}, {"api_name": "snap7.types.longword", "line_number": 205, "usage_type": "call"}, {"api_name": "ctypes.byref", "line_number": 206, "usage_type": "call"}, {"api_name": "snap7.error.check_error", "line_number": 207, "usage_type": "call"}]} +{"seq_id": "72395604", "text": "import datetime\nimport random\n\nfrom django.shortcuts import render, redirect, resolve_url, get_object_or_404\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.urls import reverse, reverse_lazy\nfrom django.views.generic import FormView, TemplateView\nfrom django.views import View\nfrom django.db import transaction\nfrom django.contrib.auth.hashers import make_password\nfrom django.contrib.staticfiles import finders\nfrom django.core.mail import EmailMessage, send_mail\nfrom django.forms import formset_factory\nfrom django.template.loader import render_to_string\n\nfrom .forms import RegisterForm, LoginForm, ComroomAdminForm, PasswordResetForm, GetAdminForm, LoginForm_multi\nfrom .models import School, AdminUser, Notice, Comroom, IPs\n\nfrom .multiforms import MultiFormsView\n# Create your views here.\n\n\ndef privacy_agree(request):\n template_name = 'privacy.html'\n\n file_path = finders.find('com_privacy.txt')\n searched_location = finders.searched_locations\n f = open(file_path, 'r')\n data = f.read()\n f.close()\n\n return render(request, template_name, {'privacy': data})\n\n\ndef agree_pirv(request):\n request.session['privacy'] = True\n return redirect('/school/register')\n\n\nclass RegisterView(FormView):\n template_name = 'register.html'\n form_class = RegisterForm\n success_url = '/'\n\n def get(self, request, *args, **kwargs):\n # 정상적인 방법(개인정보동의)으로 접근했는지 판단\n try:\n privacy = request.session['privacy']\n except:\n print(\"No privacy session\")\n return redirect('/')\n else:\n if not privacy:\n return redirect('/')\n return super().get(self, request, args, kwargs)\n\n def form_valid(self, form):\n with transaction.atomic():\n\n school = School(\n province=form.cleaned_data.get('province'),\n name=form.cleaned_data.get('name')+'초등학교',\n ea=form.cleaned_data.get('ea'),\n s_code=random.randint(1000, 9999)\n )\n school.save()\n adminUser = AdminUser(\n school=school,\n user=form.cleaned_data.get('user'),\n password=make_password(form.cleaned_data.get('password')),\n realname=form.cleaned_data.get('realname'),\n email=form.cleaned_data.get('email'),\n auth_key=randstr(50),\n is_active=False\n )\n self.request.session['username'] = adminUser.realname\n self.request.session['user_id'] = adminUser.user\n adminUser.save()\n for i in range(int(form.cleaned_data.get('ea'))):\n comroom = Comroom(\n school=school,\n roomNo=i+1,\n name=f\"컴{i+1}실\",\n caption='위치, 교실 이용방법, 이용시 주의사항 등'\n )\n comroom.save()\n mail_title = \"컴룸닷컴 가입 인증메일\"\n mail_args = {'name': adminUser.realname,\n 'mail_link': adminUser.auth_key}\n mail_context = \"컴룸닷컴 가입 인증메일\"\n mail_html = render_to_string('mail_template.html', mail_args)\n send_mail(mail_title, mail_context, 'ssamko@kakao.com',\n [adminUser.email], html_message=mail_html)\n message = f\"{adminUser.realname} 선생님께서 입력하신 메일({adminUser.email})로 인증 링크를 발송했습니다. \\\n 이메일 인증을 하는 이유\"\n\n # privacy 세션 초기화\n self.request.session['privacy'] = False\n return render(self.request, 'notice.html', {'message': message})\n # return super().form_valid(form)\n\n\nclass LoginView(FormView):\n template_name = 'login.html'\n form_class = LoginForm\n success_url = '/'\n\n def form_valid(self, form):\n user = form.data.get('user')\n user = AdminUser.objects.get(\n user=user)\n self.request.session['username'] = user.realname\n self.request.session['user_id'] = user.user\n self.request.session['school'] = user.school.id\n\n return super().form_valid(form)\n\n\nclass MultipleFormsLoginView(MultiFormsView):\n template_name = \"login.html\"\n form_classes = {'login': LoginForm_multi,\n 'get_admin': GetAdminForm,\n }\n\n success_urls = {\n 'login': reverse_lazy('index'),\n 'get_admin': reverse_lazy('send_password_mail'),\n }\n\n def login_form_valid(self, form):\n user = form.cleaned_data.get('user')\n password = form.cleaned_data.get('password')\n form_name = form.cleaned_data.get('action')\n print(user)\n user = AdminUser.objects.get(\n user=user)\n self.request.session['username'] = user.realname\n self.request.session['user_id'] = user.user\n self.request.session['school'] = user.school.id\n return HttpResponseRedirect(self.get_success_url(form_name))\n\n def get_admin_form_valid(self, form):\n print('form valid')\n email = form.cleaned_data.get('email')\n teacher_name = form.cleaned_data.get('teacher_name')\n adminUser = AdminUser.objects.get(\n email=email, realname=teacher_name)\n form_name = form.cleaned_data.get('action')\n self.request.session['adminUser_pk'] = adminUser.pk\n print(adminUser)\n\n return HttpResponseRedirect(self.get_success_url(form_name))\n\n\ndef ex_login(request):\n request.session['username'] = '박새로이'\n request.session['user_id'] = 'icic'\n request.session['school'] = AdminUser.objects.get(user='icic').school.id\n return redirect('/')\n\n\ndef logout(request):\n if 'username' in request.session:\n del(request.session['username'])\n del(request.session['user_id'])\n\n return redirect('/')\n\n\ndef ip_getter(request):\n try:\n ip_addr = request.META['REMOTE_ADDR']\n except:\n print('localhost')\n else:\n\n try:\n this_ip = IPs.objects.get(ip=ip_addr)\n except:\n new_ip = IPs(ip=ip_addr, )\n if request.session['school']:\n new_ip.school = School.objects.get(\n id=request.session['school'])\n new_ip.save()\n else:\n\n this_ip.ip_count += 1\n this_ip.save()\n\n\ndef index(request):\n # ip_getter(request)\n context = {}\n\n username = request.session.get('username')\n user_id = request.session.get('user_id')\n\n # get notice objects\n notices = Notice.objects.filter(isshow=True)\n context['notices'] = notices\n\n if username:\n try:\n adminUser = AdminUser.objects.get(\n realname=username, user=user_id)\n school = adminUser.school\n context['username'] = username\n context['school'] = school.name\n context['s_code'] = school.s_code\n context['is_active'] = adminUser.is_active\n request.session['school_info'] = school.id\n except:\n redirect('/')\n\n elif 's_code' in request.session:\n context['s_code'] = request.session['s_code']\n\n return render(request, 'index.html', context)\n\n\nclass AboutView(TemplateView):\n template_name = \"about.html\"\n\n\nclass ComroomAdminView(View):\n template_name = 'comroom_admin.html'\n success_url = '/'\n context = {}\n\n def get(self, request, *args, **kwargs):\n context = self.context\n forms = []\n try:\n school = School.objects.get(id=request.session['school_info'])\n except:\n redirect('/')\n\n for i in range(school.comroom_set.count()):\n room = school.comroom_set.get(roomNo=i+1)\n # initial form by getting data from DB\n comform = ComroomAdminForm(initial={'room_name': room.name,\n 'room_caption': room.caption,\n })\n #form_name = 'form'+str(i+1)\n\n forms.append(comform)\n\n context['forms'] = forms\n\n return render(request, self.template_name, context)\n\n def post(self, request, *args, **kwargs):\n context = self.context\n forms = []\n\n school = School.objects.get(id=request.session['school_info'])\n\n for i in range(school.comroom_set.count()):\n\n # bind form with instance\n form = school.comroom_set.get(roomNo=i+1)\n print(form)\n print(request.POST)\n form.name = request.POST['room_name'+str(i+1)]\n form.caption = request.POST['room_caption'+str(i+1)]\n form.save()\n\n return redirect('/')\n\n def form_valid(self, form):\n return super().form_valid(form)\n\n# 새로 생긴 컴퓨터룸 table에 학교별 데이터 생성하기 위한 함수\n\n\ndef make_room(request):\n schools = School.objects.all()\n for school in schools:\n for room in range(school.ea):\n\n a = Comroom(school=school,\n name=f'컴{room+1}실',\n caption='위치, 이용안내 등',\n roomNo=room+1)\n # 이미 존재하는지 검사\n if not Comroom.objects.filter(school=school,\n roomNo=room+1).exists():\n a.save()\n\n return redirect('/')\n\n\ndef time_admin(request):\n template_name = \"time_admin.html\"\n context = {}\n times = []\n\n school = School.objects.get(id=request.session['school_info'])\n timetables = school.timetable_set.all().order_by('-date')\n\n for i in range(timetables.count()):\n times.append(timetables[i])\n\n context['times'] = times\n\n return render(request, template_name, context)\n # return redirect('/')\n\n\ndef del_time(request, **kwargs):\n\n school = School.objects.get(id=request.session['school_info'])\n timetables = school.timetable_set.all().order_by('-date')\n timetables[kwargs['i']].delete()\n\n return redirect('/time_admin/')\n\n# token init. method\n\n\ndef randstr(length):\n rstr = \"0123456789abcdefghijklnmopqrstuvwxyzABCDEFGHIJKLNMOPQRSTUVWXYZ\"\n rstr_len = len(rstr) - 1\n result = \"\"\n for i in range(length):\n result += rstr[random.randint(0, rstr_len)]\n return result\n\n\ndef user_active(request, token):\n adminUser = get_object_or_404(AdminUser, auth_key=token)\n if adminUser.reg_date < datetime.datetime.now() - datetime.timedelta(hours=3):\n adminUser.school.delete()\n message = \"만료된 링크입니다. 다시 가입을 신청하세요\"\n else:\n adminUser.is_active = True\n adminUser.auth_key = ''\n adminUser.save()\n message = \"인증되었습니다. 불편한 사항은 언제든 말씀해주세요 ^^\"\n return render(request, 'notice.html', {'message': message})\n\n\ndef reset_password(request, token):\n adminUser = get_object_or_404(AdminUser, auth_key=token)\n\n if request.method == 'GET':\n reset_form = PasswordResetForm()\n return render(request, \"reset_password.html\", {'teacher_name': adminUser.realname,\n 'form': reset_form})\n else:\n reset_form = PasswordResetForm(request.POST)\n if reset_form.is_valid():\n adminUser.password = make_password(\n reset_form.cleaned_data.get('password'))\n adminUser.auth_key = ''\n adminUser.save()\n request.session['user_id'] = adminUser.user\n request.session['username'] = adminUser.realname\n request.session['school'] = adminUser.school.id\n return redirect('/')\n\n return render(request, \"reset_password.html\", {'teacher_name': adminUser.realname})\n\n\ndef send_password_mail(request):\n adminUser_pk = request.session['adminUser_pk']\n adminUser = AdminUser.objects.get(pk=adminUser_pk)\n\n while True:\n auth_key = randstr(50)\n if not AdminUser.objects.filter(auth_key=auth_key):\n break\n\n adminUser.auth_key = auth_key\n adminUser.save()\n\n mail_title = \"컴룸닷컴 비밀번호 재설정\"\n mail_args = {'teacher_name': adminUser.realname,\n 'token': adminUser.auth_key}\n mail_context = \"컴룸닷컴 비밀번호 재설정\"\n mail_html = render_to_string('password_mail.html', mail_args)\n send_mail(mail_title, mail_context, 'ssamko@kakao.com',\n [adminUser.email], html_message=mail_html)\n message = f\"{adminUser.realname} 선생님께서 입력하신 메일({adminUser.email})로\\\n 비밀번호 재설정 메일을 보내드렸습니다. 8시간 안에 재설정해주세요.\"\n return render(request, \"notice.html\", {'message': message})\n", "sub_path": "school/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 12901, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.contrib.staticfiles.finders.find", "line_number": 26, "usage_type": "call"}, {"api_name": "django.contrib.staticfiles.finders", "line_number": 26, "usage_type": "name"}, {"api_name": "django.contrib.staticfiles.finders.searched_locations", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.contrib.staticfiles.finders", "line_number": 27, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 32, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 37, "usage_type": "call"}, {"api_name": "django.views.generic.FormView", "line_number": 40, "usage_type": "name"}, {"api_name": "forms.RegisterForm", "line_number": 42, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 51, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 54, "usage_type": "call"}, {"api_name": "django.db.transaction.atomic", "line_number": 58, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 58, "usage_type": "name"}, {"api_name": "models.School", "line_number": 60, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 64, "usage_type": "call"}, {"api_name": "models.AdminUser", "line_number": 67, "usage_type": "call"}, {"api_name": "django.contrib.auth.hashers.make_password", "line_number": 70, "usage_type": "call"}, {"api_name": "models.Comroom", "line_number": 80, "usage_type": "call"}, {"api_name": "django.template.loader.render_to_string", "line_number": 91, "usage_type": "call"}, {"api_name": "django.core.mail.send_mail", "line_number": 92, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 99, "usage_type": "call"}, {"api_name": "django.views.generic.FormView", "line_number": 103, "usage_type": "name"}, {"api_name": "forms.LoginForm", "line_number": 105, "usage_type": "name"}, {"api_name": "models.AdminUser.objects.get", "line_number": 110, "usage_type": "call"}, {"api_name": "models.AdminUser.objects", "line_number": 110, "usage_type": "attribute"}, {"api_name": "models.AdminUser", "line_number": 110, "usage_type": "name"}, {"api_name": "multiforms.MultiFormsView", "line_number": 119, "usage_type": "name"}, {"api_name": "forms.LoginForm_multi", "line_number": 121, "usage_type": "name"}, {"api_name": "forms.GetAdminForm", "line_number": 122, "usage_type": "name"}, {"api_name": "django.urls.reverse_lazy", "line_number": 126, "usage_type": "call"}, {"api_name": "django.urls.reverse_lazy", "line_number": 127, "usage_type": "call"}, {"api_name": "models.AdminUser.objects.get", "line_number": 135, "usage_type": "call"}, {"api_name": "models.AdminUser.objects", "line_number": 135, "usage_type": "attribute"}, {"api_name": "models.AdminUser", "line_number": 135, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 140, "usage_type": "call"}, {"api_name": "models.AdminUser.objects.get", "line_number": 146, "usage_type": "call"}, {"api_name": "models.AdminUser.objects", "line_number": 146, "usage_type": "attribute"}, {"api_name": "models.AdminUser", "line_number": 146, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 152, "usage_type": "call"}, {"api_name": "models.AdminUser.objects.get", "line_number": 158, "usage_type": "call"}, {"api_name": "models.AdminUser.objects", "line_number": 158, "usage_type": "attribute"}, {"api_name": "models.AdminUser", "line_number": 158, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 159, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 167, "usage_type": "call"}, {"api_name": "models.IPs.objects.get", "line_number": 178, "usage_type": "call"}, {"api_name": "models.IPs.objects", "line_number": 178, "usage_type": "attribute"}, {"api_name": "models.IPs", "line_number": 178, "usage_type": "name"}, {"api_name": "models.IPs", "line_number": 180, "usage_type": "call"}, {"api_name": "models.School.objects.get", "line_number": 182, "usage_type": "call"}, {"api_name": "models.School.objects", "line_number": 182, "usage_type": "attribute"}, {"api_name": "models.School", "line_number": 182, "usage_type": "name"}, {"api_name": "models.Notice.objects.filter", "line_number": 199, "usage_type": "call"}, {"api_name": "models.Notice.objects", "line_number": 199, "usage_type": "attribute"}, {"api_name": "models.Notice", "line_number": 199, "usage_type": "name"}, {"api_name": "models.AdminUser.objects.get", "line_number": 204, "usage_type": "call"}, {"api_name": "models.AdminUser.objects", "line_number": 204, "usage_type": "attribute"}, {"api_name": "models.AdminUser", "line_number": 204, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 213, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 218, "usage_type": "call"}, {"api_name": "django.views.generic.TemplateView", "line_number": 221, "usage_type": "name"}, {"api_name": "django.views.View", "line_number": 225, "usage_type": "name"}, {"api_name": "models.School.objects.get", "line_number": 234, "usage_type": "call"}, {"api_name": "models.School.objects", "line_number": 234, "usage_type": "attribute"}, {"api_name": "models.School", "line_number": 234, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 236, "usage_type": "call"}, {"api_name": "forms.ComroomAdminForm", "line_number": 241, "usage_type": "call"}, {"api_name": "forms.append", "line_number": 246, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 250, "usage_type": "call"}, {"api_name": "models.School.objects.get", "line_number": 256, "usage_type": "call"}, {"api_name": "models.School.objects", "line_number": 256, "usage_type": "attribute"}, {"api_name": "models.School", "line_number": 256, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 268, "usage_type": "call"}, {"api_name": "models.School.objects.all", "line_number": 277, "usage_type": "call"}, {"api_name": "models.School.objects", "line_number": 277, "usage_type": "attribute"}, {"api_name": "models.School", "line_number": 277, "usage_type": "name"}, {"api_name": "models.Comroom", "line_number": 281, "usage_type": "call"}, {"api_name": "models.Comroom.objects.filter", "line_number": 286, "usage_type": "call"}, {"api_name": "models.Comroom.objects", "line_number": 286, "usage_type": "attribute"}, {"api_name": "models.Comroom", "line_number": 286, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 290, "usage_type": "call"}, {"api_name": "models.School.objects.get", "line_number": 298, "usage_type": "call"}, {"api_name": "models.School.objects", "line_number": 298, "usage_type": "attribute"}, {"api_name": "models.School", "line_number": 298, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 306, "usage_type": "call"}, {"api_name": "models.School.objects.get", "line_number": 312, "usage_type": "call"}, {"api_name": "models.School.objects", "line_number": 312, "usage_type": "attribute"}, {"api_name": "models.School", "line_number": 312, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 316, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 326, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 331, "usage_type": "call"}, {"api_name": "models.AdminUser", "line_number": 331, "usage_type": "argument"}, {"api_name": "datetime.datetime.now", "line_number": 332, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 332, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 332, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 340, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 344, "usage_type": "call"}, {"api_name": "models.AdminUser", "line_number": 344, "usage_type": "argument"}, {"api_name": "forms.PasswordResetForm", "line_number": 347, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 348, "usage_type": "call"}, {"api_name": "forms.PasswordResetForm", "line_number": 351, "usage_type": "call"}, {"api_name": "django.contrib.auth.hashers.make_password", "line_number": 353, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 360, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 362, "usage_type": "call"}, {"api_name": "models.AdminUser.objects.get", "line_number": 367, "usage_type": "call"}, {"api_name": "models.AdminUser.objects", "line_number": 367, "usage_type": "attribute"}, {"api_name": "models.AdminUser", "line_number": 367, "usage_type": "name"}, {"api_name": "models.AdminUser.objects.filter", "line_number": 371, "usage_type": "call"}, {"api_name": "models.AdminUser.objects", "line_number": 371, "usage_type": "attribute"}, {"api_name": "models.AdminUser", "line_number": 371, "usage_type": "name"}, {"api_name": "django.template.loader.render_to_string", "line_number": 381, "usage_type": "call"}, {"api_name": "django.core.mail.send_mail", "line_number": 382, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 386, "usage_type": "call"}]} +{"seq_id": "18606391", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/3/12 11:58\n# @Author : Aries\n# @Site :\n# @File : locust_demo1.py\n# @Software: PyCharm\nimport random\nfrom locust import HttpUser, TaskSet, task\nfrom locust.clients import HttpSession\n# import pysnooper\nimport base64\nimport json\nimport time\nimport hashlib\nimport queue\nimport hmac\nimport os\nimport requests\n\ndef bs64_en(data):\n bytes_data = json.dumps(data).encode(\"utf-8\")\n result_data = str(base64.b64encode(bytes_data),\"utf-8\")\n return result_data\n\ndef bs64_de(data):\n temp = str(base64.b64decode(data),\"utf-8\")\n return temp\n\ndef get_timestamp1():\n return str(int(time.time()))\n\ndef md5a(md5str=None):\n h1 = hashlib.md5()\n h1.update(md5str.encode(encoding=\"utf-8\"))\n return h1.hexdigest()\n\ndef sign_alg(str_a,secuyrity):\n r_str_a = bytes((str_a).encode('utf-8'))\n r_secuyrity = bytes((secuyrity).encode('utf-8'))\n\n sign = (hmac.new(r_secuyrity,r_str_a,digestmod=hashlib.sha256)).hexdigest().upper()\n return sign\n\n\n# 随机生成3-10个中文字符\ndef nickname_random():\n nick_name = \"\"\n for i in range(random.randint(3,10)):\n val = chr(random.randint(0x4e00, 0x9fbf))\n nick_name += val\n return nick_name\n\nclass ZhaoyuLogin(TaskSet):\n\n host = \"http://qx-fat.qianshi188.com\"\n\n # 替换数据\n\n def on_start(self):\n\n print(\"-------------------- Test start --------------------\")\n\n def on_stop(self):\n print(\"-------------------- Test over --------------------\")\n\n @task(1)\n #@pysnooper.snoop(\"D:\\zhaoyulocust.log\")\n\n # 注册登录\n def login_user(self):\n\n self.timestamp = get_timestamp1()\n self.login_url = \"/gateway/customer/customer/registerLogin\"\n\n try:\n self.user_data = self.locust.queue_data.get() # 从队列中取出user赋值给user_data\n print(self.user_data)\n\n except queue.Empty:\n print(\"数据为空了......\")\n exit()\n\n # base_data\n self.channel_1 = \"android\"\n self.machinecode_1 = \"A000009C1C1FA3\"\n self.send_code_service = \"sms.send\"\n self.login_service = \"oauth.login\"\n self.token_1 = \"\"\n self.version_1 = \"v1.0\"\n self.key_1 = \"40a406d77d1b2ecc\"\n\n self.send_code_data = {\n \"Mobile\":self.user_data,\n 'SendType': 1\n }\n self.r_send_code_data = bs64_en(self.send_code_data)\n\n self.base_sign1 = \"channel=\" + self.channel_1 + \"&data=\" + self.r_send_code_data + \\\n \"&machinecode=\" + self.machinecode_1 + \"&service=\" + self.send_code_service + \\\n \"×tamp=\" + self.timestamp + \"&token=\" + self.token_1 + \"&version=\" + \\\n self.version_1 + \"&key=\" + self.key_1\n\n self.sign1 = md5a(self.base_sign1)\n\n # result_data\n self.result_send_code_data = {\n \"channel\": self.channel_1,\n \"data\": self.r_send_code_data,\n \"machinecode\": self.machinecode_1,\n \"service\": self.send_code_service,\n \"timestamp\": self.timestamp,\n \"token\": self.token_1,\n \"version\": self.version_1,\n \"sign\": self.sign1\n }\n\n response1 = HttpSession(base_url=self.host).request(method='post',url=self.login_url,json=self.result_send_code_data,\n headers=None,catch_response=False)\n print(\"Response content1:\", response1.text, self.result_send_code_data)\n\n self.login_data = {\n \"Mobile\":self.user_data,\n \"Code\": \"666666\",\n \"Longitude\": \"1223.232\",\n \"Latitude\": \"232.232\"\n }\n self.r_login_data = bs64_en(self.login_data)\n\n self.base_sign2 = \"channel=\" + self.channel_1 + \"&data=\" + self.r_login_data + \\\n \"&machinecode=\" + self.machinecode_1 + \"&service=\" + self.login_service + \\\n \"×tamp=\" + self.timestamp + \"&token=\" + self.token_1 + \"&version=\" + \\\n self.version_1 + \"&key=\" + self.key_1\n self.sign2 = md5a(self.base_sign2)\n\n self.result_login_data = {\n \"channel\": self.channel_1,\n \"data\": self.r_login_data,\n \"machinecode\": self.machinecode_1,\n \"service\": self.login_service,\n \"timestamp\": self.timestamp,\n \"token\": self.token_1,\n \"version\": self.version_1,\n \"sign\": self.sign2\n }\n\n response2 = HttpSession(base_url=self.host).request(method='post', url=self.login_url, json=self.result_login_data,\n headers=None, catch_response=False)\n\n #with self.client.post(\"http://210.22.78.174:809/api/gateway\",)\n print(\"Response content2:\",response2.text,self.result_login_data)\n #\n # #assert 200 == response1.status_code\n\n @task(1)\n # 完善DNA\n def edit_user_profile(self):\n self.edit_user_profile_url = \":80/api/gateway\"\n self.edit_user_profile_data = {\n \"NickName\" : nickname_random(),\n \"PhotoGraph\":\"http://imgcdn.zhaoyugf.com/FmWaTF5CbhwfxXQBhKgFvdMbGob6\",\n \"Gender\":random.randint(1,2)\n }\n self.channel_2 = \"android\"\n self.machinecode_2 = \"A000009C1C1FA3\"\n self.edit_user_profile_service = \"user.editbase\"\n self.token_2 = random.choice(self.user_token)\n self.version_2 = \"v1.0\"\n self.key_2 = \"40a406d77d1b2ecc\"\n self.r_edit_user_profile_data = bs64_en(self.edit_user_profile_data)\n self.timestamp_2 = get_timestamp1()\n\n self.base_sign3 = \"channel=\" + self.channel_2 + \"&data=\" + self.r_edit_user_profile_data + \"&machinecode=\" + self.machinecode_2\\\n + \"&service=\" + self.edit_user_profile_service + \"×tamp=\" + self.timestamp_2 + \\\n \"&token=\" + self.token_2 + \"&version=\" + self.version_2 + \"&key=\" + self.key_2\n\n self.sign3 = md5a(self.base_sign3)\n\n self.result_edit_user_profile_data = {\n \"channel\": self.channel_2,\n \"data\": self.r_edit_user_profile_data,\n \"machinecode\": self.machinecode_2,\n \"service\": self.edit_user_profile_service,\n \"timestamp\": self.timestamp_2,\n \"token\": self.token_2,\n \"version\": self.version_2,\n \"sign\": self.sign3\n }\n\n\n response3 = HttpSession(base_url=self.host).request(method='post', url=self.edit_user_profile_url,\n json=self.result_edit_user_profile_data,\n headers=None, catch_response=False)\n print(\"Response content3:\", response3.text, self.result_edit_user_profile_data)\n\nclass Test_run(HttpUser):\n\n #task_set = ZhaoyuLogin\n\n queue_data = queue.Queue() #实例化一个队列\n for i in range(200000):\n phone_num = str(14100000000 + i)\n print(phone_num)\n queue_data.put_nowait(phone_num) #把每一个用户账号存到队列中\n task_set = ZhaoyuLogin\n min_wait = 0\n max_wait = 0\n\n\nif __name__ == \"__main__\":\n # os模块执行系统命令,相当于在cmd切换到当前脚本目录,执行locust -f locust_login.py\n #os.system(\"locust -f locust_demo1.py --web-host=127.0.0.1\")\n os.system(\"locust -f locust_demo1.py --web-host=192.168.90.164\")\n\n #os.system(\"locust -f locust_send_group_gift.py --host=192.168.99.110\")\n\n", "sub_path": "locust_demo1.py", "file_name": "locust_demo1.py", "file_ext": "py", "file_size_in_byte": 7596, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "json.dumps", "line_number": 22, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 23, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 27, "usage_type": "call"}, {"api_name": "time.time", "line_number": 31, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 34, "usage_type": "call"}, {"api_name": "hmac.new", "line_number": 42, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 42, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 49, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 50, "usage_type": "call"}, {"api_name": "locust.TaskSet", "line_number": 54, "usage_type": "name"}, {"api_name": "queue.Empty", "line_number": 80, "usage_type": "attribute"}, {"api_name": "locust.clients.HttpSession", "line_number": 118, "usage_type": "call"}, {"api_name": "locust.clients.HttpSession", "line_number": 147, "usage_type": "call"}, {"api_name": "locust.task", "line_number": 67, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 162, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 167, "usage_type": "call"}, {"api_name": "locust.clients.HttpSession", "line_number": 191, "usage_type": "call"}, {"api_name": "locust.task", "line_number": 155, "usage_type": "call"}, {"api_name": "locust.HttpUser", "line_number": 196, "usage_type": "name"}, {"api_name": "queue.Queue", "line_number": 200, "usage_type": "call"}, {"api_name": "os.system", "line_number": 213, "usage_type": "call"}]} +{"seq_id": "296905603", "text": "import asyncio\nimport logging\n\nfrom dynaconf import settings as s\nfrom wiring import Graph\nfrom wiring.scanning import scan_to_graph\n\nfrom . import db\nfrom .utils import parse_options, setup_logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef checker(graph):\n logger.debug('running checker')\n\n check = graph.get('temperature.checker')\n\n loop = asyncio.get_event_loop()\n asyncio.ensure_future(check.temperatures(), loop=loop)\n loop.run_forever()\n\n\ndef api(graph):\n logger.debug('running api')\n\n app = graph.get('app')\n app().run(debug=s.DEBUG, port=s.API_PORT, host=s.API_HOST)\n\n\ndef main():\n options = parse_options()\n\n setup_logging(options)\n\n try:\n graph = Graph()\n\n scan_to_graph(['climate'], graph)\n\n graph.validate()\n\n db.setup(graph.get('db.conn')(), options)\n\n if options.api:\n api(graph)\n\n elif options.temperature:\n checker(graph)\n\n except Exception as ex:\n logger.error(ex, exc_info=True)\n\n raise ex\n", "sub_path": "climate/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1024, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 19, "usage_type": "call"}, {"api_name": "asyncio.ensure_future", "line_number": 20, "usage_type": "call"}, {"api_name": "dynaconf.settings.DEBUG", "line_number": 28, "usage_type": "attribute"}, {"api_name": "dynaconf.settings", "line_number": 28, "usage_type": "name"}, {"api_name": "dynaconf.settings.API_PORT", "line_number": 28, "usage_type": "attribute"}, {"api_name": "dynaconf.settings.API_HOST", "line_number": 28, "usage_type": "attribute"}, {"api_name": "utils.parse_options", "line_number": 32, "usage_type": "call"}, {"api_name": "utils.setup_logging", "line_number": 34, "usage_type": "call"}, {"api_name": "wiring.Graph", "line_number": 37, "usage_type": "call"}, {"api_name": "wiring.scanning.scan_to_graph", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "398944802", "text": "import dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport dash_bootstrap_components as dbc #0.11.0\nfrom app import app\nfrom app import server\n\n#app = dash.Dash(__name__, external_stylesheets=[dbc.themes.DARKLY])\n\nfrom apps import page1, page2\n#\n\napp.layout = html.Div([\n dbc.Row([\n dbc.Col([\n dcc.Location(id='url', refresh=False),\n\n dcc.Link('Investor EDA Page|', href='/apps/page1'), # first page\n dcc.Link('Lender Prediction Page', href='/apps/page2'), #second page\n ],width={'size': 6, 'offset': 1})]),\n dbc.Row([html.Div(id='page-content', children=[])])\n])\n\n\n\n@app.callback(Output('page-content', 'children'),\n [Input('url', 'pathname')])\ndef display_page(pathname):\n if pathname == '/apps/page1': #first page .py location\n # app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])\n return page1.layout\n if pathname == '/apps/page2': #second page .py location\n # app = dash.Dash(__name__, external_stylesheets=[dbc.themes.DARKLY])\n return page2.layout\n else:\n return \" ______________________________Feeling lucky today? Click above to learn more about Lending Club:)_________________________________\"\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n", "sub_path": "index.py", "file_name": "index.py", "file_ext": "py", "file_size_in_byte": 1354, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "app.app.layout", "line_number": 13, "usage_type": "attribute"}, {"api_name": "app.app", "line_number": 13, "usage_type": "name"}, {"api_name": "dash_html_components.Div", "line_number": 13, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 14, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 15, "usage_type": "call"}, {"api_name": "dash_core_components.Location", "line_number": 16, "usage_type": "call"}, {"api_name": "dash_core_components.Link", "line_number": 18, "usage_type": "call"}, {"api_name": "dash_core_components.Link", "line_number": 19, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 21, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 21, "usage_type": "call"}, {"api_name": "apps.page1.layout", "line_number": 31, "usage_type": "attribute"}, {"api_name": "apps.page1", "line_number": 31, "usage_type": "name"}, {"api_name": "apps.page2.layout", "line_number": 34, "usage_type": "attribute"}, {"api_name": "apps.page2", "line_number": 34, "usage_type": "name"}, {"api_name": "app.app.callback", "line_number": 26, "usage_type": "call"}, {"api_name": "app.app", "line_number": 26, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 26, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 27, "usage_type": "call"}, {"api_name": "app.app.run_server", "line_number": 40, "usage_type": "call"}, {"api_name": "app.app", "line_number": 40, "usage_type": "name"}]} +{"seq_id": "410338251", "text": "#正则表达式匹配库\nimport re\nimport time\nfrom urllib.parse import urlencode\n#操作mongodb库\nimport pymongo\n#发出请求库\nimport requests\ntmp=1540023952177\n#将url请求对象转换为请求参数\ndata={\n 'callback': 'jsonp_queryMoreNums',\n 'provinceCode': 71,\n 'cityCode': 710,\n 'monthFeeLimit': 0,\n 'groupKey': 71243032,\n 'searchCategory': 3,\n 'net': '01',\n 'amounts': 200,\n 'codeTypeCode':'',\n 'searchValue':'',\n 'qryType': '02',\n 'goodsNet': 4,\n '_': tmp\n}\n\n# 1:先获取mongodb的数据库地址,2:再获取数据库名,3:再获取表名\ntable = pymongo.MongoClient('localhost')['phone']['number']\n\nwhile True:\n tmp=tmp+1\n headers = {\"user-agent\" : \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11\"}\n url1 = 'https://m.10010.com/NumApp/NumberCenter/qryNum?'+urlencode(data)\n #请求ajax返回的json数据\n response1=requests.get(url1,headers=headers)\n #print(response1.text)\n info=re.findall('1(.*?),',response1.text)\n\n list=[]\n for i in range(len(info)):\n if table.find({'number': {'$regex': '1'+str(info[i])}}).count()==0:\n dict = {}\n dict['number']='1'+info[i]\n list.append(dict)\n print(list)\n\n if len(list)!=0:\n #插入单条数据到mongodb时数据的格式为字典形式:table.insert(dict)\n #插入多条数据到mongodb时数据的的格式应为列表包裹字典的形式:table.insert(list)\n table.insert_many(list)\n\n time.sleep(0.1)", "sub_path": "python开发/爬虫资料/无框架crawl/爬手机号码.py", "file_name": "爬手机号码.py", "file_ext": "py", "file_size_in_byte": 1568, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pymongo.MongoClient", "line_number": 28, "usage_type": "call"}, {"api_name": "urllib.parse.urlencode", "line_number": 33, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 35, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 37, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "397574939", "text": "from database import add_user, read_from_db\r\nfrom flask import Flask, render_template, request\r\nfrom superhero_api import SuperHeroAPI\r\n\r\napp = Flask(__name__)\r\ns = SuperHeroAPI()\r\n\r\n@app.route('/', methods=['POST', 'GET'])\r\ndef index():\r\n img_url = s.get_hero_image_url('Arsenal')\r\n _data = img_url\r\n return render_template('index.html', data=_data)", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 360, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "superhero_api.SuperHeroAPI", "line_number": 6, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "145061763", "text": "from matplotlib.backends.backend_pdf import PdfPages\nfrom ..Hyperion.hyp_math import *\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nplotdir = '/Users/sudhirraskutti/Desktop/Thesis/PaperI/Figures/'\ndatadir = '/u/raskutti/PhD/Hyperion/Tests/RadParGrav/'\nhstfile = 'id0/RadParGrav.hst'\noutfile = 'RadParGrav.out'\nhostname = 'raskutti@bellona.astro.princeton.edu'\n\nysize = 0.25 * 11.69\nxsize = 0.4 * 8.27\nfontsize = '10'\n\ndslist = [10.0, 100.0, 1000.0]\nnds = len(dslist)\nepsin = np.linspace(0,1,num=1000)\nepsout = []\nx = 1.0\nsigma = 1.5\npsi = 2000.0\n\nfor i in xrange(0,nds):\n sigma0 = dslist[i]\n epsof = eps_of_all(epsin, x, sigma, sigma0, psi)\n epsout.append(epsof)\n\ndslist = [0.5, 1.5, 2.5]\nnds = len(dslist)\nsepsout = []\nsigma0 = 100.0\n\nfor i in xrange(0,nds):\n sigma = dslist[i]\n epsof = eps_of_all(epsin, x, sigma, sigma0, psi)\n sepsout.append(epsof)\n\nplt.figure(figsize = [xsize,2*ysize])\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif', size=fontsize)\n\nplt.subplot(2,1,1)\nplt.subplots_adjust(left=0.2)\nplow, pmid, phigh, = plt.plot(epsin, epsout[0], 'k', epsin, epsout[1], 'r', epsin, epsout[2], 'b')\nplt.legend((plow, pmid, phigh), (r\"$\\displaystyle \\Sigma_{\\rm cl,0} = 10~M_\\odot~{\\rm pc^{-2}}$\",r\"$\\displaystyle 10^2$\",r\"$\\displaystyle 10^3$\"),prop={'size':8})\nplt.axis([0,1,0,1])\nplt.xticks([0,0.5,1],[' ',' ',' '])\nplt.yticks([0,0.5,1])\n\n#plt.xlabel(r\"$\\displaystyle t / t_{\\rm ff}$\")\nplt.ylabel(r\"$\\displaystyle \\varepsilon_{\\rm of}$\")\nplt.text(0.05*1,0.9*1,r\"$\\displaystyle(a)$\")\n\nplt.subplot(2,1,2)\nplow, pmid, phigh, = plt.plot(epsin, sepsout[0], 'k', epsin, sepsout[1], 'r', epsin, sepsout[2], 'b')\nplt.legend((plow, pmid, phigh), (r\"$\\displaystyle \\sigma_{{\\rm ln}\\Sigma} = 0.5$\",r\"$\\displaystyle 1.5$\",r\"$\\displaystyle 2.5$\"),prop={'size':8})\nplt.axis([0,1,0,1])\nplt.xticks([0,0.5,1])\nplt.yticks([0,0.5,1])\n\nplt.xlabel(r\"$\\displaystyle \\varepsilon$\")\nplt.ylabel(r\"$\\displaystyle \\varepsilon_{\\rm of}$\")\nplt.text(0.05*1,10**(-1+0.9*3),r\"$\\displaystyle(b)$\")\n\npp = PdfPages(plotdir + 'f27adj.pdf')\npp.savefig()\npp.close()\n\n\n", "sub_path": "RadiationSims/Python/Paper_I/f21adj.py", "file_name": "f21adj.py", "file_ext": "py", "file_size_in_byte": 2072, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "numpy.linspace", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rc", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rc", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots_adjust", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.backends.backend_pdf.PdfPages", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "484335493", "text": "\nimport sys\nimport os\nimport re\nimport datetime\nimport csv\nimport utils\nimport transform\n\nfrom time import gmtime, strftime\nimport messages as Msg\nfrom utils import Map\n\nfrom scandir import scandir, walk\n\n# global variables\nepoch = datetime.datetime.utcfromtimestamp(0)\n\n# utility functions to evaluate python expressions defined in configuration strings\n# evaluates filter on the row's tags and fields values \ndef eval_filter(filter, timestamp, tags, fields):\n try:\n for k,v in tags.items():\n if v is not None:\n exec(k + \"=\\\"\" + str(v) + \"\\\"\")\n for k,v in fields.items():\n if v is not None:\n exec(k + \"=\" + str(v))\n return eval(filter)\n except Exception as e:\n Msg.err_msg(\"Error when evaluating the filter '%s': %s!\" % (filter, str(e))) \n return False \n# // eval_filter\n\n# transformation of data\ndef eval_transform(transform_exprs, timestamp, tags, fields):\n try:\n # declare variables and assign values to them\n for k,v in tags.items():\n if v is not None:\n exec(k + \"=\\\"\" + v + \"\\\"\")\n for k,v in fields.items():\n if v is not None:\n exec(k + \"=\" + str(v))\n\n # transform \n for expr in transform_exprs:\n try:\n exec(expr)\n except Exception as ex:\n pass\n Msg.info2_msg(\"Error when evaluating transformation '%s': %s\"%(expr,str(ex)))\n\n # get only variables that come from tags and fiedls, remove all local ones\n # the list in the below expression must contain all local variables in this function prior to this call!\n nf = { k:v for k,v in locals().items() if k not in [\"k\",\"v\",\"umc_id\",\"transform_exprs\",\"timestamp\",\"tags\",\"fields\",\"expr\",\"ex\"] } \n \n __t2 = {}; __f2 = {}\n for k,v in nf.items():\n if k in tags.keys():\n exec(\"__t2['%s']=%s\"%(k,k))\n elif k in fields.keys():\n exec(\"__f2['%s']=%s\"%(k,k))\n else:\n exec(\"value=%s\"%(k))\n if isinstance(value,int) or isinstance(value,float):\n exec(\"__f2['%s']=%s\"%(k,k))\n else:\n exec(\"__t2['%s']=%s\"%(k,k))\n # new tag or field that resulted from transformation\n # // for\n\n return __t2,__f2\n except Exception as e:\n Msg.err_msg(\"Error when evaluating transformations for %s: %s\"%(umc_id, str(e)))\n return tags,fields\n# // eval_transform\n\n# umc configuration object for umc configuration file metrics.conf\nclass UmcReader:\n def __init__(self, config, writer_id):\n self.config=config\n \n # read common reader's params\n base_key=\"common.umcpush.reader-params\"\n self.params=Map(\n max_batchsize_rows = self.config.value(base_key + \".max-batchsize-rows\", 50),\n max_batchsize_files = self.config.value(base_key + \".max-batchsize-files\", 300),\n log_file_group = self.config.value(base_key + \".log-file-group\", 1),\n common_tags = self.config.value(base_key + \".common-tags\").split(','),\n common_fields = self.config.value(base_key + \".common-fields\").split(','),\n default_timefield = self.config.value(base_key + \".default-timefield\", \"datetime\"),\n default_timeformat = self.config.value(base_key + \".default-timeformat\", \"%Y-%m-%d %H:%M:%S\"),\n tzoffset = utils.float_ex(self.config.value(base_key + \".tzoffset\", 0), 0)\n )\n \n # update any value that may be overriden in writer's specific parameters\n writers=config.value(\"common.umcpush.writers\")\n for writer in writers:\n if writer[\"writer-id\"]==writer_id:\n rparams=writer[\"reader-params\"]\n if rparams is not None:\n for k,v in rparams.items():\n k=k.replace(\"-\", \"_\")\n if self.params.get(k):\n self.params[k]=v\n else:\n Msg.warn_msg(\"The reader param %s is invalid in %s\"%(k,key))\n \n # *** reads and checks umc definition for a specific umc id\n def read_umcdef(self, umc_id, umcconf): \n # tags and fields cols of this umc definition\n tcols = [x.strip() for x in self.config.value_element(umcconf, \"reader.tags\").split(',') if x != '' ]\n fcols = [x.strip() for x in self.config.value_element(umcconf, \"reader.fields\").split(',') if x != '' ]\n \n # combine with common tags and fields cols\n tcols.extend(x for x in \n [y.strip() for y in self.params.common_tags ] \n if x != '' and x not in tcols and '!'+x not in tcols )\n fcols.extend(x for x in \n [y.strip() for y in self.params.common_fields ] \n if x != '' and x not in fcols and '!'+x not in tcols )\n \n # remove all commented out fields and tags\n tcols = [x for x in tcols if not(x.startswith('!')) ]\n fcols = [x for x in fcols if not(x.startswith('!')) ]\n \n # read and check time field and its format\n timeformat=self.config.value_element(umcconf, \"reader.timeformat\", self.params.default_timeformat)\n try:\n if timeformat not in ['_unix_', '_time_s_', '_time_ms_']:\n strftime(timeformat, gmtime())\n except Exception as e:\n raise Exception(\"The time format '%s' is invalid for umc '%s': %s!\" % (timeformat,umc_id,e)) \n \n timefield=self.config.value_element(umcconf, \"reader.timefield\", self.params.default_timefield) \n tzfield=self.config.value_element(umcconf, \"reader.tzfield\", None) \n \n filter=self.config.value_element(umcconf, \"reader.filter\", None)\n \n # transformation expressions\n transform=self.config.value_element(umcconf, \"reader.transform\", None)\n \n return Map(tcols=tcols,fcols=fcols,timeformat=timeformat,timefield=timefield,tzfield=tzfield,\n filter=filter,transform=transform)\n # // read_umcdef\n\n # unix time\n def unix_time_millis(self, dt):\n return int((dt - epoch).total_seconds() * 1000)\n\n # retrieves the first batch of log files sorted by modified time\n def get_batch_logs(self, logDir, umc_instanceids, files_in_buffer=[]):\n pattern = re.compile(\".+_[0-9]+.*\\.log.{log_file_group}$\".format(log_file_group=self.params.log_file_group))\n search_re=logDir + \"/[a-zA-Z0-9\\._\\-]+/([a-zA-Z0-9\\-\\._]+)\" # + \"|\".join(GlobalContext.config.umc_instanceids(False)) + \")$\";\n \n batch=[]; cnt=0\n for dirname, dirnames, filenames in walk(logDir):\n #Msg.info1_msg(\"walk: %s, filenames=%d\"%(dirname,len(filenames)))\n m=re.match(search_re, dirname)\n if m and m.group(1) in umc_instanceids:\n for filename in filenames:\n fullfname=os.path.join(dirname, filename)\n if fullfname not in files_in_buffer and pattern.match(filename):\n cnt=cnt+1\n if cnt <= self.params.max_batchsize_files: \n batch.append(fullfname)\n if cnt > self.params.max_batchsize_files:\n break\n return sorted(batch, key=lambda fn: os.stat(fn).st_mtime, reverse=True)\n # // get_batch_logs\n \n # read data points from a single log file\n def read_datapoints(self, logfilename, umcdef, create_writeitem_func): \n datapoints = []; notags=False; nofields=False; \n tzoffset = self.params.tzoffset \n \n if umcdef.enabled: \n # read datapoints\n with open(logfilename, 'r') as csvfile:\n reader = csv.DictReader(csvfile, delimiter=',')\n for row in reader:\n # remove None keys\n row = { k:v for k, v in row.items() if k is not None }\n \n # timestamp\n try:\n if not(umcdef.reader.timefield in row):\n raise ValueError(\"Cannot find time field '\" + umcdef.reader.timefield + \"' in data row!\") \n if umcdef.reader.timeformat == \"_unix_\" or umcdef.reader.timeformat == \"_time_s_\":\n timestamp = long(row[umcdef.reader.timefield]) * 1000000000 \n elif umcdef.reader.timeformat == \"_time_ms_\":\n timestamp = long(row[umcdef.reader.timefield]) * 1000000 \n else:\n if umcdef.reader.tzfield is not None and umcdef.reader.tzfield in row:\n tzoffset = utils.float_ex(row[umcdef.reader.tzfield], self.params.tzoffset) \n timestamp = (self.unix_time_millis(datetime.datetime.strptime(row[umcdef.reader.timefield],umcdef.reader.timeformat)) - int(tzoffset*60*60*1000)) * 1000000\n except Exception as e:\n # output error and skip this row\n Msg.err_msg(\"Cannot read or convert time to timestamp for %s: %s\"%(umcdef.umcid,str(e)))\n continue \n \n # create tags and fields\n tags = { k:str(v) for k, v in row.items() if k in umcdef.reader.tcols }\n fields = { k:utils.float_ex(v) for k, v in row.items() if k in umcdef.reader.fcols } \n notags = (len(tags) == 0)\n \n # only add this row if there is at least one field with some value\n if len([ v for k,v in fields.items() if v is not None ])>0:\n # evaluate transformations\n if umcdef.reader.transform is not None:\n tags,fields = eval_transform(umcdef.reader.transform,timestamp,tags,fields)\n\n # only add this row if filter holds on this row or there is no filter\n if umcdef.reader.filter is None or eval_filter(umcdef.reader.filter, timestamp,tags, fields):\n try:\n records=create_writeitem_func(umcdef, timestamp, fields, tags)\n if records is not None and isinstance(records, list):\n datapoints+=records\n except Exception as e:\n Msg.err_msg(\"Error occured while creating data points item: %s\"%str(e))\n # // if write data\n \n # // end reading rows\n # // end open file\n \n # check for no tags\n if notags and len(datapoints) > 0:\n Msg.warn_msg(\"The definition of %s contains no tags presented in the log file %s!\"%(umcdef.umcid,os.path.basename(logfilename)))\n \n return datapoints\n \n", "sub_path": "bin/libs/umcreader.py", "file_name": "umcreader.py", "file_ext": "py", "file_size_in_byte": 11231, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 17, "usage_type": "attribute"}, {"api_name": "messages.err_msg", "line_number": 31, "usage_type": "call"}, {"api_name": "messages.info2_msg", "line_number": 52, "usage_type": "call"}, {"api_name": "messages.err_msg", "line_number": 75, "usage_type": "call"}, {"api_name": "utils.Map", "line_number": 86, "usage_type": "call"}, {"api_name": "utils.float_ex", "line_number": 94, "usage_type": "call"}, {"api_name": "messages.warn_msg", "line_number": 108, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 132, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 132, "usage_type": "call"}, {"api_name": "utils.Map", "line_number": 144, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 154, "usage_type": "call"}, {"api_name": "scandir.walk", "line_number": 158, "usage_type": "call"}, {"api_name": "re.match", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 163, "usage_type": "call"}, {"api_name": "os.path", "line_number": 163, "usage_type": "attribute"}, {"api_name": "os.stat", "line_number": 170, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 181, "usage_type": "call"}, {"api_name": "utils.float_ex", "line_number": 196, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 197, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 197, "usage_type": "attribute"}, {"api_name": "messages.err_msg", "line_number": 200, "usage_type": "call"}, {"api_name": "utils.float_ex", "line_number": 205, "usage_type": "call"}, {"api_name": "messages.err_msg", "line_number": 221, "usage_type": "call"}, {"api_name": "messages.warn_msg", "line_number": 229, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 229, "usage_type": "call"}, {"api_name": "os.path", "line_number": 229, "usage_type": "attribute"}]} +{"seq_id": "431341264", "text": "import sys\nimport datetime\n\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtNetwork import *\n\nfrom macro import BlockRandomizedList\n\nfrom BCI import BCI\n\nclass Trainer(QWidget):\n def __init__(self):\n super(Trainer, self).__init__()\n \n #init UI\n self.initUI()\n \n self.is_connected = False\n self.server = QTcpSocket(self)\n self.server.connectToHost('127.0.0.1', 45451)\n \n self.server.readyRead.connect(self.readData)\n self.server.connected.connect(self.connected)\n self.server.disconnected.connect(self.disconnected)\n \n def initUI(self):\n \n self.setGeometry(0,0,640,480)\n self.setWindowTitle('Trainer') \n self.setStyleSheet(\"background-color:black;\"); \n \n hbox = QHBoxLayout()\n hbox.setAlignment(Qt.AlignLeft)\n \n self.btn = QPushButton('Start',self)\n self.btn.clicked.connect(self.btnClicked) \n \n self.connect_state = QLabel('Disconnected',self)\n self.connect_state.setStyleSheet(\"QLabel { color: red; font-size: 14pt; font-family: 'Times New Roman';}\");\n \n hbox.addWidget(self.btn)\n hbox.addWidget(self.connect_state) \n \n hbox2 = QHBoxLayout()\n hbox2.setAlignment(Qt.AlignCenter)\n \n self.inst = QLabel('Instruction',self)\n self.inst.setStyleSheet(\"QLabel { color: white; font-size: 55pt; font-family: 'Times New Roman';}\");\n self.inst.setSizePolicy(QSizePolicy(QSizePolicy.Maximum,QSizePolicy.Expanding))\n hbox2.addWidget(self.inst)\n \n vbox = QVBoxLayout()\n vbox.setAlignment(Qt.AlignTop) \n \n vbox.addLayout(hbox)\n vbox.addLayout(hbox2) \n \n self.setLayout(vbox)\n self.setWindowFlags(Qt.FramelessWindowHint)\n self.show()\n \n def requestData(self, n_sample):\n self.server.writeData('raw,'+str(n_sample)+'\\n');\n \n def readData(self): \n self.training.log.write(self.server.readLine())\n \n def connected(self):\n self.connect_state.setText('Connected')\n self.connect_state.setStyleSheet(\"QLabel { color: green; font-size: 14pt; font-family: 'Times New Roman';}\");\n \n self.is_connected = True\n \n def disconnected(self):\n self.connect_state.setText('Disconnected')\n self.connect_state.setStyleSheet(\"QLabel { color: red; font-size: 14pt; font-family: 'Times New Roman';}\");\n \n self.is_connected = False\n \n def btnClicked(self): \n self.training = self.Training(self)\n self.timer_id=self.startTimer(50)\n \n def timerEvent(self, event):\n if not self.training.run():\n self.killTimer(self.timer_id)\n #training BCI model\n bci = BCI()\n bci.train(self.training.name)\n bci.saveModel(\"bci.m\")\n bci.plotPSD()\n \n #nested class to control the training section \n class Training():\n def __init__(self, _parent):\n self.parent=_parent\n \n \n \n self.len_trial = 5\n self.len_interval = 1\n \n self.n_sample = self.len_trial*512\n \n self.n_class = 2\n self.class_name = ('Neutral', 'Concentration')\n \n self.n_trial = 5 *self.n_class\n self.cur_trial = 0\n \n self.state = 0\n self.is_init = False\n \n self.seq=BlockRandomizedList(self.n_trial, self.n_class)\n \n self.timer = QElapsedTimer()\n \n self.name=datetime.datetime.now().strftime('%Y%b%d_%H%M%S.log')\n self.log = open(self.name,'w')\n \n def run(self):\n if self.state == 0: \n #interval \n if not self.is_init:\n self.is_init = True\n self.parent.inst.setText('+')\n self.timer.start()\n elif (self.timer.elapsed()/1000)>=self.len_interval:\n self.is_init = False\n self.state = 1 \n else:\n pass\n elif self.state == 1:\n #during trial\n if not self.is_init:\n self.is_init = True\n #about data \n self.log.write(str(self.seq[self.cur_trial])+'\\n') \n \n self.timer.start()\n self.parent.inst.setText(self.class_name[self.seq[self.cur_trial]])\n elif (self.timer.elapsed()/1000)>=self.len_trial:\n self.cur_trial+=1\n self.is_init = False\n \n #about data \n self.parent.requestData(self.n_sample)\n \n if self.cur_trial==self.n_trial:\n self.state=2\n else:\n self.state=0\n else:\n pass\n else:#self.state == 2\n #end\n if not self.is_init:\n self.is_init = True\n self.log.close()\n self.timer.start()\n self.parent.inst.setText(\"End\")\n elif (self.timer.elapsed()/1000)>=self.len_interval:\n del self.timer \n self.parent.inst.setText(\"\")\n return False\n else:\n pass\n \n return True\n\nif __name__=='__main__':\n app = QApplication(sys.argv)\n \n trainer = Trainer()\n \n sys.exit(app.exec_())\n", "sub_path": "bci/Trainer.py", "file_name": "Trainer.py", "file_ext": "py", "file_size_in_byte": 5960, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "BCI.BCI", "line_number": 89, "usage_type": "call"}, {"api_name": "macro.BlockRandomizedList", "line_number": 115, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 119, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 119, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 173, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 177, "usage_type": "call"}]} +{"seq_id": "372191314", "text": "#! /usr/bin/python3\nimport itertools\nimport sys\nimport re\nimport yaml\n\nfrom collections import defaultdict, OrderedDict\n\ndef cyclic_permutation(iterable):\n iterable = tuple(iterable)\n n = len(iterable)\n return (tuple(iterable[i - j] for i in range(n)) for j in range(n))\n\nclass PoussinImportError(Exception):\n pass\n \nclass PoussinRelAlreadyCreated(Exception):\n pass\n\nclass PoussinIncompatibleRelError(Exception):\n pass\n \nclass IdDict(dict):\n def __missing__(self, key):\n return key\n\nTRIVIAL_CONTEXT = IdDict()\n\ncamel_split = lambda s: re.sub(\"([a-z])([A-Z])\",\"\\g<1> \\g<2>\",s).lower()\n\ndef upcase_first_letter(s):\n return s[0].upper() + s[1:]\n \ndef duplicates(seq):\n seen = set()\n seen_add = seen.add\n # adds all elements it doesn't know yet to seen and all other to seen_twice\n seen_twice = set( x for x in seq if x in seen or seen_add(x) )\n # turn the set into a list (as requested)\n return seen_twice\n\ndef transform_poussin(element):\n def typed_arg_to_dict(arg):\n name, type = arg.split('<')\n return {\"name\": name, \"type\": type}\n \n if element[\"type\"] == \"object\":\n \n for constructor in element[\"constructors\"]:\n args = [typed_arg_to_dict(arg) for arg in constructor[\"signature\"].split()]\n constructor[\"signature\"] = args\n try:\n constructor[\"relations\"] = [rel.split() for rel in constructor[\"relations\"]]\n except KeyError: pass\n elif element[\"type\"] == \"relation\":\n try:\n element[\"incompatible_relations\"] = [hypo.split() for hypo in element[\"incompatible_relations\"]]\n except KeyError: pass\n \n elif element[\"type\"] == \"property\":\n element[\"hypothesis\"] = [hypo.split() for hypo in element[\"hypothesis\"]]\n element[\"conclusions\"] = [hypo.split() for hypo in element[\"conclusions\"]]\n \n if element[\"type\"] in (\"relation\", \"property\"):\n element[\"signature\"] = [typed_arg_to_dict(arg) for arg in element[\"signature\"].split()]\n return element\n\nget_args_type = lambda signature: [ arg[\"type\"] for arg in signature]\nget_args_name = lambda signature: [ arg[\"name\"] for arg in signature]\n\nclass PoussinInterprete:\n SPECIAL_COMMANDS = (\"hypo\", \"aide\")\n def initial_state():\n return {\n \"relations\": defaultdict(set),\n \"objects\": OrderedDict(),\n }\n def __init__(self, instance=None, state=None, stream_out=sys.stdout, locked=False):\n self.types = instance.types if instance else OrderedDict([(\"NouveauNom\",{\"type\":\"special\", \"gender\": \"male\"},)])\n self.stream_out = stream_out\n self.state = state or self.__class__.initial_state()\n self.locked = locked\n \n @property\n def state(self):\n return {\n \"relations\": self.relations,\n \"objects\": self.objects,\n }\n \n @state.setter\n def state(self, state):\n for key, value in state.items():\n setattr(self, key, value)\n \n \n def load(self, data):\n data = list(data)\n all_types = self._load_types(data)\n# self._type_checking(data, all_types)\n self._add_elements(data)\n\n def load_yaml_file(self, file_name):\n with open(file_name) as f:\n self.load(map(transform_poussin, yaml.load_all(f)))\n \n def _load_types(self, data):\n new_types = [element[\"name\"] \n for element in data\n if element[\"type\"] == \"object\"]\n dup_types = duplicates(itertools.chain(new_types, self.types))\n if dup_types:\n raise PoussinImportError(\"Those names type appear several times: %s.\"% ', '.join(dup_names))\n return set(itertools.chain(new_types, self.types))\n\n \n def _add_elements(self, data):\n for element in data:\n name = element.pop(\"name\")\n self.types[name] = element\n \n def get_type(self, name):\n return self.objects.get(name, \"NouveauNom\")\n\n def get_display(self, name, type=None, is_a=False):\n if type is None:\n type = self.get_type(name)\n \n ty = camel_split(type)\n if not is_a:\n art = \"le\" if self.types[type][\"gender\"] == \"male\" else \"la\"\n return \"{art} {ty} {name}\".format(**locals())\n else:\n art = \"un\" if self.types[type][\"gender\"] == \"male\" else \"une\"\n return \"{name} est {art} {ty}\".format(**locals())\n\n def rel_is_true(self, rel, args):\n args = tuple(args)\n if self.types[rel].get(\"symmetric\"):\n return any(perm in self.relations[rel]\n for perm in itertools.permutations(args))\n reverse = self.types[rel].get(\"reverse\")\n if self.types[rel].get(\"cyclic\"):\n if any(perm in self.relations[rel]\n for perm in cyclic_permutation(args)):\n return True\n \n if reverse and any(perm in self.relations[rel]\n for perm in cyclic_permutation(args[-1::-1])):\n return True\n if reverse:\n return args in self.relations[rel] or args[-1::-1] in self.relations[rel]\n else:\n return args in self.relations[rel]\n\n def get_local_context(self, hypo, context=TRIVIAL_CONTEXT):\n rel = hypo[0]\n args_hypo = hypo[1:]\n args_rel = get_args_name(self.types[rel][\"signature\"])\n \n return OrderedDict((arg_rel, context[arg_hypo])\n for arg_rel, arg_hypo in zip(args_rel, args_hypo))\n \n def get_rel_display(self, rel, context=TRIVIAL_CONTEXT, checked=True):\n display = \"display\" if checked else \"displayfalse\"\n \n return self.types[rel][display].format(**context)\n\n def check_hypo(self, hypo, context):\n rel = hypo[0]\n args_name = hypo[1:]\n return self.rel_is_true(rel, (context[arg] for arg in args_name))\n\n def say(self, message):\n self.stream_out.write(message + \"\\n\")\n \n def create_rel(self, rel, context):\n new_elt = tuple(context.values())\n if self.rel_is_true(rel, new_elt):\n raise PoussinRelAlreadyCreated\n \n errors = []\n rawconstraint = self.types[rel].get(\"rawconstraint\")\n replace_quotes = lambda s: s.replace(\"'\", r\"\\'\").replace('\"',r'\\\"')\n if rawconstraint and not eval(rawconstraint[\"test\"].format(**{k: replace_quotes(v) for k,v in context.items()})):\n errors.append(rawconstraint[\"errormsg\"])\n for incomp in self.types[rel].get(\"incompatible_relations\", []):\n local_context = self.get_local_context(incomp, context)\n if self.rel_is_true(incomp[0], local_context.values()):\n errors.append(self.get_rel_display(incomp[0], local_context))\n \n if errors:\n raise PoussinIncompatibleRelError(errors)\n\n self.relations[rel].add(new_elt)\n \n def help(self, args):\n if len(args) == 1 and (args[0] in self.types or args[0] in self.SPECIAL_COMMANDS):\n arg = args[0]\n \n if arg == 'hypo':\n self.say(\"Affiche toutes les connaissances de Poussin.\")\n elif arg == 'aide':\n self.say(\"Ben... ça affiche l'aide !\")\n \n \n elif self.types[arg][\"type\"] == 'object':\n constructors = self.types[arg][\"constructors\"]\n article = \"un\" if self.types[arg][\"gender\"] else \"une\"\n an_object = camel_split(arg)\n if len(constructors) > 1 :\n self.say(\"Voici les différentes façons de construire %s %s :\"\n % (article, an_object))\n else:\n self.say(\"Voici la façon de construire %s %s :\" % (article, an_object))\n \n for constructor in constructors:\n args_name = get_args_name(constructor[\"signature\"])\n args_type = get_args_type(constructor[\"signature\"])\n args_is_a = (self.get_display(n,t, True) for n,t in zip(args_name, args_type))\n self.say(\"\\t* `%s %s` où %s.\"%(arg,\" \".join(args_name), \", \".join(args_is_a)))\n \n elif self.types[arg][\"type\"] == 'relation':\n signature = self.types[arg][\"signature\"]\n args_name = get_args_name(signature)\n args_type = get_args_type(signature)\n args_is_a = (self.get_display(n,t, True) for n,t in zip(args_name, args_type))\n self.say(\"L'instruction `%s %s` où :\\n\\t * %s,\\nindique à Poussin que %s.\"%\n (arg, \" \".join(args_name), \",\\n\\t * \".join(args_is_a), self.get_rel_display(arg)))\n elif self.types[arg][\"type\"] == 'property':\n signature = self.types[arg][\"signature\"]\n args_name = get_args_name(signature)\n args_type = get_args_type(signature)\n args_is_a = (self.get_display(n,t, True) for n,t in zip(args_name, args_type))\n self.say(\"En considérant que :\\n\\t * %s.\"%\n \",\\n\\t * \".join(args_is_a))\n \n hypothesis = (self.get_rel_display(h[0], self.get_local_context(h)) for h in self.types[arg][\"hypothesis\"])\n self.say(\"Si :\\n\\t * %s.\" % \",\\n\\t * \".join(hypothesis))\n \n conclusions = (self.get_rel_display(h[0], self.get_local_context(h)) for h in self.types[arg][\"conclusions\"])\n self.say(\"Alors :\\n\\t * %s.\" % \",\\n\\t * \".join(conclusions))\n \n self.say(\"L'instruction `%s %s` ajoute les conclusions aux \"\n \"connaissances de Poussin, à condition que les hypothèses \"\n \"soient bien vérifiées\" % (arg,\" \".join(args_name)))\n else:\n self.say(\"Voici les commandes disponibles :\")\n self.say(\"\\t* hypo\")\n for name, type in self.types.items():\n if type[\"type\"] == 'object':\n for constructor in type[\"constructors\"]:\n self.say(\"\\t* %s %s\"%(name,\" \".join(get_args_type(constructor[\"signature\"]))))\n elif type[\"type\"] in ('relation', 'property'):\n self.say(\"\\t* %s %s\"%(name,\" \".join(get_args_type(type[\"signature\"]))))\n \n self.say(\"Tapez \\\"aide [NomDeLaCommande]\\\" pour plus de détails.\")\n \n def ask(self, question):\n command = question.strip().split()\n fun = command[0]\n args = command[1:]\n \n if self.types.get(fun, {}).get('type', \"\") not in ('object', 'relation'):\n raise ValueError(\"%s is not a valid question\"%fun)\n \n if self.types[fun][\"type\"] == \"object\":\n if len(args) != 1:\n raise ValueError(\"A question about an object can not concern more than one name\")\n \n return self.objects.get(args[0], \"\") == fun\n \n if self.types[fun][\"type\"] == \"relation\":\n return self.rel_is_true(fun, args)\n \n def execute(self, command):\n command =command.strip()\n if command[-1] == \"?\":\n question = 1\n command = command[:-1]\n try:\n self.say(\"Oui\" if self.ask(command) else \"Non\")\n except ValueError:\n self.say(\"Erreur : la question est mal formulée.\")\n return\n\n command = command.split()\n fun = command[0]\n args = command[1:]\n \n if fun not in self.SPECIAL_COMMANDS and fun not in self.types:\n self.say(\"Erreur : \" + fun + \" n'est pas une commande connue\")\n return\n \n if fun not in self.SPECIAL_COMMANDS and any(arg in self.types for arg in args):\n self.say(\"Erreur : vous avez utilisé un mot clef comme argument. Voici la liste des mots réservés:\")\n for type in self.types:\n self.say(\"\\t* %s\"%type)\n return\n\n if fun not in self.SPECIAL_COMMANDS and self.locked and self.types[fun][\"type\"] != \"property\":\n self.say(\"Poussin est verrouillé, vous ne pouvez pas utiliser la commande %s.\"%fun)\n return\n\n args_type = [self.get_type(name) for name in args]\n \n if fun == 'aide':\n self.help(args)\n \n elif fun == 'hypo':\n if self.objects or any(self.relations.values()):\n self.say(\"Voici ce que nous savons :\")\n for name in self.objects:\n self.say(\"\\t* %s.\" % self.get_display(name, is_a=True))\n for name, rels in self.relations.items():\n args_name = get_args_name(self.types[name][\"signature\"])\n for args_rel in rels:\n context = dict(zip(args_name, args_rel))\n self.say(\"\\t* %s.\" % self.get_rel_display(name, context))\n \n else:\n self.say(\"Nous ne savons rien pour l'instant...\")\n \n elif self.types[fun][\"type\"] == \"object\":\n constructors = self.types[fun][\"constructors\"]\n try:\n # search the first constructor whose the signature matchin with the asked one\n constructor = next(constructor\n for constructor in constructors\n if get_args_type(constructor[\"signature\"]) == args_type)\n context = dict(zip(get_args_name(constructor[\"signature\"]), args))\n \n rawconstraint = constructor.get(\"rawconstraint\")\n if rawconstraint and not eval(rawconstraint[\"test\"].format(**context)):\n self.say(\"Erreur : \" + rawconstraint[\"errormsg\"])\n return\n \n new_name = constructor[\"display\"].format(**context)\n if new_name in self.objects:\n self.say(\"Erreur : impossible de créer %s, ce nom est déjà pris \"\n \"(cela signifie souvent que l'objet est déjà créé)\"\n %self.get_display(new_name, fun))\n else:\n self.objects[new_name] = fun\n self.say(upcase_first_letter(\"%s a été créé\"%self.get_display(new_name)))\n rels = constructor.get(\"relations\")\n if rels:\n context['__self__'] = new_name\n self.say(\"Nous savons immédiatement que :\")\n for rel in rels:\n local_context = self.get_local_context(rel, context)\n self.create_rel(rel[0], local_context)\n self.say(\"\\t* %s\" % self.get_rel_display(rel[0], local_context))\n \n \n except StopIteration: # there was no signature matching with the one asked\n self.say(\"La commande %s n'a pas été utilisée correctement.\"% fun)\n if len(args):\n self.say(\"Voici l'ensemble des arguments donnés:\")\n for arg in args:\n self.say(\"\\t* %s\"%self.get_display(arg, is_a=True))\n self.help([fun])\n \n elif self.types[fun][\"type\"] == \"relation\":\n signature_type = get_args_type(self.types[fun][\"signature\"])\n if args_type != signature_type:\n self.say(\"La commande %s n'a pas été utilisée correctement.\"% fun)\n if len(args):\n self.say(\"Voici l'ensemble des arguments donnés:\")\n for arg in args:\n self.say(\"\\t* %s\"%self.get_display(arg, is_a=True))\n \n self.help([fun])\n else:\n \n context = OrderedDict(zip(get_args_name(self.types[fun][\"signature\"]), args))\n try:\n self.create_rel(fun, context)\n self.say(\"Désormais, %s.\" % self.get_rel_display(fun, context))\n except PoussinRelAlreadyCreated:\n self.say(\"Nous savons déjà que %s.\" % self.get_rel_display(fun, context))\n except PoussinIncompatibleRelError as e:\n if e.args:\n self.say(\"Erreur : il est impossible de définir cette relation car :\")\n for error in e.args[0]:\n self.say(\"\\t* %s\"%error)\n else:\n self.say(\"Erreur : il est impossible de définir cette relation.\")\n\n elif self.types[fun][\"type\"] == \"property\":\n signature_type = get_args_type(self.types[fun][\"signature\"])\n if args_type != signature_type:\n self.say(\"La commande %s n'a pas été utilisée correctement.\"% fun)\n if len(args):\n self.say(\"Voici l'ensemble des arguments donnés:\")\n for arg in args:\n self.say(\"\\t* %s\"%self.get_display(arg, is_a=True))\n \n self.help([fun])\n else:\n context = dict(zip(get_args_name(self.types[fun][\"signature\"]), args))\n check = True\n self.say(\"Vérifions les hypothèses:\")\n for hypo in self.types[fun][\"hypothesis\"]:\n local_check = self.check_hypo(hypo, context)\n local_context = self.get_local_context(hypo, context)\n self.say(\"\\t* %s.\" % self.get_rel_display(hypo[0], local_context, local_check))\n if not local_check:\n check = False\n \n if check:\n self.say(\"Par conséquent :\")\n for conc in self.types[fun][\"conclusions\"]:\n rel = conc[0]\n local_context = self.get_local_context(conc, context)\n try:\n self.create_rel(rel, local_context)\n self.say(\"\\t* %s.\" % self.get_rel_display(rel, local_context))\n except PoussinRelAlreadyCreated:\n self.say(\"\\t* %s (on le savait déjà).\" % \n self.get_rel_display(rel, local_context))\n except PoussinIncompatibleRelError as e:\n self.say(\"Erreur: la propriété implique que %s, mais \"\n \"cela est impossible car:\"%self.get_rel_display(rel, local_context))\n for error in e.args[0]:\n self.say(\"\\t* %s\"%error)\n \n self.say(\"!! Le système est dans un état incohérent, il est \" \n \"conseillé de le redémarer !!\")\n break\n else:\n self.say(\"La propriété ne peut pas être appliquée.\")\n \n \nif __name__ == '__main__':\n print(\"Bienvenue dans l'assistant de preuves Poussin.\")\n interp = PoussinInterprete()\n interp.load_yaml_file(\"logic.yaml\")\n\n entry = input(\">>> Et maintenant ? \")\n while entry not in ('q', 'quit'):\n interp.execute(entry)\n entry = input(\">>> Et maintenant ? \")\n \n \n \n \n \n", "sub_path": "poussin.py", "file_name": "poussin.py", "file_ext": "py", "file_size_in_byte": 19845, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "re.sub", "line_number": 29, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 75, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 76, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 78, "usage_type": "attribute"}, {"api_name": "collections.OrderedDict", "line_number": 79, "usage_type": "call"}, {"api_name": "yaml.load_all", "line_number": 105, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 111, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 114, "usage_type": "call"}, {"api_name": "itertools.permutations", "line_number": 141, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 161, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 376, "usage_type": "call"}]} +{"seq_id": "598129157", "text": "from collections import deque\n\n\nclass Codec:\n\n def serialize(self, root):\n if not root: return None\n queue, res = deque([root]), str(root.val)\n while len(queue) > 0:\n node = queue.popleft()\n if node.left:\n queue.append(node.left)\n res += \".\" + str(node.left.val) #Remember to use \".\" as split in case that the node val is 34,12,....\n else:\n res += \".*\"\n if node.right:\n queue.append(node.right)\n res += \".\" + str(node.right.val)\n else:\n res += \".*\"\n return res\n\n def deserialize(self, data):\n if not data: return None\n data = data.split(\".\")\n root = TreeNode(int(data[0]))\n queue, i = deque([root]), 1\n while i < len(data):\n node = queue.popleft()\n if data[i] == \"*\":\n node.left = None\n else:\n node.left = TreeNode(int(data[i]))\n queue.append(node.left)\n if i + 1 >= len(data): break\n if data[i + 1] == \"*\":\n node.right = None\n else:\n node.right = TreeNode(int(data[i + 1]))\n queue.append(node.right)\n i += 2\n return root\n", "sub_path": "medium/mediumCode/BinarySearchTree/***SerializeAndDeserializeBST_449.py", "file_name": "***SerializeAndDeserializeBST_449.py", "file_ext": "py", "file_size_in_byte": 1310, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "collections.deque", "line_number": 8, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "249191734", "text": "import os\nimport nltk\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nimport re\nimport sys\n\nINPUT_FOLDER = sys.argv[1]\nOUTPUT_FOLDER = sys.argv[2]\n\nif len(sys.argv) < 3:\n print('Usage: python preprocess.py ')\n sys.exit(-1)\n\ndef preprocess_log(filetype, log_files=['dataset/mcBSC/Antti_Palojarvi/Examples/Training1/Switch_logs/no_passive_missing/EXT2_BMT.log']):\n\tcmd_log = {}\t\t# dict of filetype vs (dict of cmd_name vs log content)\n\tline_num = 1\n\tfor log_filename in log_files:\n\t\tlog_file = open(log_filename, 'r', encoding = \"ISO-8859-1\")\n\t\tcontent = log_file.readlines()\n\t\tlog_file.close()\n\n\t\tcollected_log = []\n\t\tlog_lines = []\n\t\tprev_cmd = None\n\t\tfor line in content:\n\t\t\tif \";\" in line:\n\t\t\t\twords = word_tokenize(line)\n\t\t\t\tfor word in words:\n\t\t\t\t\tif word[0] == 'Z':\n\t\t\t\t\t\tw = word.split(':')[0]\n\t\t\t\t\t\tif len(w) > 5:\n\t\t\t\t\t\t\tw = w[:4]\n\n\t\t\t\t\t\tif prev_cmd is not None:\n\t\t\t\t\t\t\tif prev_cmd not in cmd_log:\n\t\t\t\t\t\t\t\tcmd_log[prev_cmd] = [collected_log]\t\t#this is a list\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tcmd_log[prev_cmd].append(collected_log)\n\t\t\t\t\t\tprev_cmd = w\n\t\t\t\t\t\tbreak\n\t\t\t\tcollected_log = [str(line_num)+'\\t'+line]\n\t\t\telse:\n\t\t\t\tcollected_log.append(str(line_num)+'\\t'+line)\n\t\t\tline_num += 1\n\t\tif prev_cmd is not None:\n\t\t\tif prev_cmd not in cmd_log:\n\t\t\t\tcmd_log[prev_cmd] = [collected_log]\t\t#this is a list\n\t\t\telse:\n\t\t\t\tcmd_log[prev_cmd].append(collected_log)\n\n\treturn cmd_log\n\n\ndef zero_digits(s):\n # return s\n return re.sub('\\d', '0', s)\n\n\nfiletypes = {}\t\t# file types vs names of file in that type\ndef walk_directory(rootdir=INPUT_FOLDER):\n\t# filetypes = []\n\tEXCLUDES = ['.zip', '.ZIP', '.bin','.BIN', '.rar', '.MAP', '.BAK', '.BBX', '.tgz', '.DAT', '.SHL', 'TEST', '0.HW', '.xlsx']\n\tfor subdir, dirs, files in os.walk(rootdir):\n\t\tfor file in files:\n\n\t\t\tfiletype = zero_digits(file)\n\t\t\tif file[-4:] not in EXCLUDES and file[-2:] != '.Z' and filetype[-4:] != 'S000' and file != 'info.txt':\n\t\t\t\tfilename = os.path.join(subdir, file)\n\t\t\t\t# print(os.path.join(subdir, file))\n\t\t\t\t# preprocess_log(cmd_log, filetype, filename)\n\t\t\t\t# print(filetype)\n\t\t\t\tif filetype not in filetypes:\n\t\t\t\t\tfiletypes[filetype] = [filename] \n\t\t\t\telse:\n\t\t\t\t\tfiletypes[filetype].append(filename)\n\t\t\t\t# filetypes.append(filetype)\n\n\t# print(filetypes['BCM00000.log'])\n\t# print(filetypes.keys())\n\treturn filetypes\n\nfiletypes = walk_directory()\n# sys.exit(-1)\n# print(filetypes.keys())\n# print(filetypes['SUPERV0.HW'])\n# sys.exit(-1)\ndirectory = OUTPUT_FOLDER\nif not os.path.exists(directory):\n\tos.makedirs(directory)\n\nfor ftype in filetypes:\n\tif ftype == '':\n\t\tcontinue\n\tprint('processing ftype:',ftype)\n\tflog = preprocess_log(ftype, filetypes[ftype])\n\t# print(flog.keys())\n\tfor cmd in flog:\n\t\tlogs = flog[cmd]\n\t\tif cmd == '':\n\t\t\tcontinue\n\t\tout_file = open(directory+'/'+str(cmd)+'_'+str(ftype), 'w')\n\t\tfor log in logs:\n\t\t\tout_file.write(''.join(log))\n\t\tout_file.close()\n\n# ftype = 'IP_configurations.txt'\n# print(preprocess_log(ftype, filetypes[ftype][:2]))\n# process_onekind()\n# print(cmd_log)\n", "sub_path": "preprocess.py", "file_name": "preprocess.py", "file_ext": "py", "file_size_in_byte": 3005, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "sys.argv", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 8, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 10, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 12, "usage_type": "call"}, {"api_name": "nltk.tokenize.word_tokenize", "line_number": 27, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 56, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 89, "usage_type": "call"}]} +{"seq_id": "14354025", "text": "from openpyxl import Workbook\nfrom openpyxl import load_workbook\nimport json\n\nfrom Models.Cell import Cell\nfrom Models.DataTree import DataTree\nfrom Models.Data import Data\nfrom Functions.FunctionCatalog import FunctionCatalog\nfrom Functions.Catalog import Catalog\nfrom ProcedureProfiles.Interpreter import Interpreter\nfrom Services.Search import Search\n\n\nclass Pattern:\n\n\tfinder = FunctionCatalog()\n\n\tdef __init__(self, file: str):\n\t\tself.procedure_profile = Interpreter(file).interpret_profile()\n\t\tself.output = None\n\t\tself.coordinates = None\n\n\tdef interpret(self):\n\t\tself.output = DataTree()\n\t\t# TODO : Handle parent\n\t\tparent = None\n\t\t# print(\"Profile: \" + str(self.procedure_profile)) # TODO : FOR TEST\n\t\tcount = 0\n\t\tinput_dir = self.procedure_profile.pop(0)\n\t\tself.coordinates = self.procedure_profile.pop(0)\n\t\tfor token in self.procedure_profile:\n\t\t\t# print(\"Token: \" + token)\n\t\t\tis_function = Pattern.finder.get(token)\n\t\t\tif is_function:\n\t\t\t\ttoken = Catalog.catalog[token](parent)\n\t\t\telse:\n\t\t\t\t# TODO : What if NOT a function?\n\t\t\t\t# TODO : Token TO Sheet and Column\n\t\t\t\ttemp = \"\"\n\t\t\t\toutput = list()\n\t\t\t\tindex = 0\n\t\t\t\tfor character in token:\n\t\t\t\t\tif character == \".\":\n\t\t\t\t\t\toutput.insert(index, temp)\n\t\t\t\t\t\tindex += 1\n\t\t\t\t\t\ttemp = \"\"\n\t\t\t\t\telse:\n\t\t\t\t\t\ttemp += character\n\t\t\t\toutput.insert(0, temp)\n\n\t\t\t\t# print(\"Output for interpreter: \" + str(output))\n\n\t\t\t\tfile = output[1]\n\t\t\t\t# print(\"File: \" + file)\n\t\t\t\tsheet = output[2]\n\t\t\t\t# print(\"Sheet: \" + sheet)\n\t\t\t\tcolumn = int(output[0])\n\t\t\t\t# print(\"Col: \" + str(column))\n\n\t\t\t\ttoken = Data(parent, input_dir + \"\\\\\" + file, sheet, column)\n\n\t\t\t# print(\"New Token: \" + str(token))\n\t\t\tself.output.add(token)\n\t\t\tparent = token\n\t\t\t# self.output.draw()\n\t\t\tcount += 1\n\n\t\treturn self.output\n\n\t@staticmethod\n\tdef gen_tree(post_fix_expression):\n\t\t# print(post_fix_expression)\n\t\tdata_tree = DataTree()\n\t\troot = False\n\t\tparent = None\n\t\tfor node in post_fix_expression:\n\t\t\ttoken = None\n\t\t\tif isinstance(node, str):\n\t\t\t\t# print(\"String \" + node)\n\t\t\t\tis_function = Pattern.finder.get(node)\n\t\t\t\tif is_function:\n\t\t\t\t\ttoken = Catalog.catalog[node](parent)\n\t\t\t\t\t# print(\"Function \" + str(token))\n\t\t\t\t\tdata_tree.add(token)\n\t\t\t\t\"\"\"if not root:\n\t\t\t\t\troot = True\n\t\t\t\t\tdata_tree.root = token\n\t\t\t\t\tprint(\"Added \" + str(token) + \" as root\")\"\"\"\n\t\t\t\tparent = token\n\t\t\t\tcontinue\n\t\t\t# print(\"Adding \" + str(node) + \" to tree\")\n\t\t\tdata_value = Search.find(node['file'], node['sheet'], node['column'], node['object'])\n\t\t\tdata_value = data_value[0]\n\t\t\ttoken = Cell(parent, data=data_value, file=node['file'], sheet=node['sheet'],\n\t\t\t column=node['column'], style=None)\n\t\t\t# print(\"Adding token \" + str(token) + \" to tree\")\n\t\t\tdata_tree.add(token)\n\t\t\t# print(\"Node added \" + str(node))\n\t\t\t# print(\"ROOT \" + str(data_tree.root))\n\n\t\t# data_tree.draw()\n\t\t# print(str(data_tree.process()))\n\t\treturn data_tree.process()\n\n\t@staticmethod\n\tdef test_new(path=\"void.xls\"):\n\t\twb = Workbook()\n\t\twb.active[\"A1\"] = 25\n\t\twb.save(path)\n\n\t@staticmethod\n\tdef test_update(path=\"void.xls\"):\n\t\twb = load_workbook(path)\n\t\twb.active[\"A1\"] = 45\n\t\twb.active[\"B1\"] = 45\n\t\twb.save(path)\n\n\t@staticmethod\n\tdef test_json():\n\t\ttemp = json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])\n\t\tprint(temp)\n", "sub_path": "application/analytics/Services/Pattern.py", "file_name": "Pattern.py", "file_ext": "py", "file_size_in_byte": 3200, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "Functions.FunctionCatalog.FunctionCatalog", "line_number": 16, "usage_type": "call"}, {"api_name": "ProcedureProfiles.Interpreter.Interpreter", "line_number": 19, "usage_type": "call"}, {"api_name": "Models.DataTree.DataTree", "line_number": 24, "usage_type": "call"}, {"api_name": "Functions.Catalog.Catalog.catalog", "line_number": 35, "usage_type": "attribute"}, {"api_name": "Functions.Catalog.Catalog", "line_number": 35, "usage_type": "name"}, {"api_name": "Models.Data.Data", "line_number": 60, "usage_type": "call"}, {"api_name": "Models.DataTree.DataTree", "line_number": 73, "usage_type": "call"}, {"api_name": "Functions.Catalog.Catalog.catalog", "line_number": 82, "usage_type": "attribute"}, {"api_name": "Functions.Catalog.Catalog", "line_number": 82, "usage_type": "name"}, {"api_name": "Services.Search.Search.find", "line_number": 92, "usage_type": "call"}, {"api_name": "Services.Search.Search", "line_number": 92, "usage_type": "name"}, {"api_name": "Models.Cell.Cell", "line_number": 94, "usage_type": "call"}, {"api_name": "openpyxl.Workbook", "line_number": 107, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 113, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 120, "usage_type": "call"}]} +{"seq_id": "530544912", "text": "import math\n\nimport cv2\nimport numpy as np\nfrom PIL.Image import fromarray\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtWidgets import QFileDialog\n# import vgg\n\nimport classify\n\n\ndef division(img):\n # 分别储存颜色和正反信息的两个Set\n capcolor = img[20:4000, 20:3000] # 拍照的边造成不可控融合,所以同意把边切了\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n gradX = cv2.Sobel(gray, ddepth=cv2.CV_32F, dx=1, dy=0)\n gradY = cv2.Sobel(gray, ddepth=cv2.CV_32F, dx=0, dy=1)\n gradient = cv2.subtract(gradX, gradY)\n gradient = cv2.convertScaleAbs(gradient)\n kernel = np.ones((5, 5), np.uint8)\n dilated = cv2.dilate(gradient, kernel)\n for i in range(3):\n dilated = cv2.dilate(dilated, kernel)\n (_, thresh) = cv2.threshold(dilated, 60, 255, cv2.THRESH_BINARY)\n kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (80, 80))\n closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel2)\n kernel3 = np.ones((10, 10), np.uint8)\n erosion = cv2.erode(closed, kernel3, iterations=1)\n blur = cv2.medianBlur(erosion, 31)\n dilate = cv2.dilate(blur, kernel3)\n close1 = cv2.morphologyEx(dilate, cv2.MORPH_CLOSE, kernel2)\n capall = close1[20:4000, 20:3000]\n gq = fromarray(np.uint8(capall))\n gq.save(\"capA.jpg\")\n img1 = cv2.imread('capA.jpg')\n img_gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\n ret, bipix = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n img, contours, hierarchy = cv2.findContours(bipix, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n return contours, capcolor\n\n\ndef judge(contours, capcolor):\n colorSet = []\n pnSet = []\n capSet = []\n all = len(contours)\n for i in range(all):\n cnt = contours[i]\n (x, y), radius = cv2.minEnclosingCircle(cnt)\n (x, y, radius) = np.int0((x, y, radius)) # 圆心和半径取整\n if (radius > 240 and radius < 300):\n x1 = x - radius\n x2 = x + radius\n y1 = y - radius\n y2 = y + radius\n capThis = capcolor[y1:y2, x1:x2]\n name = \"littlecap\" + str(i) + \".jpg\"\n img_RGB = cv2.cvtColor(capThis, cv2.COLOR_BGR2RGB)\n # 分类\n clf = classify.classify(img_RGB) # 返回0/1\n\n gqi = fromarray(np.uint8(img_RGB))\n gqi.save(name)\n cv2.rectangle(capcolor, (x1, y1), (x2, y2), (238, 104, 123), thickness=7)\n # vgg.pred(name, \"./\")\n rect = cv2.minAreaRect(cnt) # 最小外接矩形\n box = np.int0(cv2.boxPoints(rect)) # 矩形的四个角点取整\n p1 = box[0]\n p2 = box[1]\n p3 = box[2]\n p4 = p2 - p1\n p5 = p3 - p2\n p6 = math.hypot(p4[0], p4[1])\n p7 = math.hypot(p5[0], p5[1])\n colorSet.append(color(img_RGB))\n if abs(p6 - p7) > 100:\n capSet.append(i)\n pnSet.append(\"立\")\n else:\n capSet.append(i)\n if clf == 0:\n pnSet.append(\"正\")\n else:\n pnSet.append(\"反\")\n # 因为目前测试图片问题,如果没有10个完全被识别展示没什么用,可以看命令行里的输出\n # print(colorSet)\n # print(capSet)\n # print(pnSet)\n rec = fromarray(np.uint8(cv2.cvtColor(capcolor, cv2.COLOR_BGR2RGB)))\n rec.save(\"rec.jpg\")\n\n return capSet, pnSet, colorSet\n\n\ndef judge_ResNet(contours, capcolor):\n colorSet = []\n pnSet = []\n capSet = []\n all = len(contours)\n for i in range(all):\n cnt = contours[i]\n (x, y), radius = cv2.minEnclosingCircle(cnt)\n (x, y, radius) = np.int0((x, y, radius)) # 圆心和半径取整\n if (radius > 240 and radius < 300):\n x1 = x - radius\n x2 = x + radius\n y1 = y - radius\n y2 = y + radius\n capThis = capcolor[y1:y2, x1:x2]\n name = \"littlecap\" + str(i) + \".jpg\"\n img_RGB = cv2.cvtColor(capThis, cv2.COLOR_BGR2RGB)\n # 分类\n clf = classify.classify(img_RGB) # 返回0/1\n\n gqi = fromarray(np.uint8(img_RGB))\n gqi.save(name)\n rect = cv2.minAreaRect(cnt) # 最小外接矩形\n box = np.int0(cv2.boxPoints(rect)) # 矩形的四个角点取整\n p1 = box[0]\n p2 = box[1]\n p3 = box[2]\n p4 = p2 - p1\n p5 = p3 - p2\n p6 = math.hypot(p4[0], p4[1])\n p7 = math.hypot(p5[0], p5[1])\n if abs(p6 - p7) > 100:\n capSet.append(i)\n pnSet.append(\"立\")\n else:\n capSet.append(i)\n if clf == 0:\n pnSet.append(\"正\")\n else:\n pnSet.append(\"反\")\n # 因为目前测试图片问题,如果没有10个完全被识别展示没什么用,可以看命令行里的输出\n # print(colorSet)\n # print(capSet)\n # print(pnSet)\n return (capSet, pnSet)\n\n\ndef judge_VGG(contours, capcolor):\n colorSet = []\n pnSet = []\n capSet = []\n all = len(contours)\n for i in range(all):\n cnt = contours[i]\n (x, y), radius = cv2.minEnclosingCircle(cnt)\n (x, y, radius) = np.int0((x, y, radius)) # 圆心和半径取整\n if (radius > 240 and radius < 300):\n x1 = x - radius\n x2 = x + radius\n y1 = y - radius\n y2 = y + radius\n capThis = capcolor[y1:y2, x1:x2]\n name = \"littlecap\" + str(i) + \".jpg\"\n img_RGB = cv2.cvtColor(capThis, cv2.COLOR_BGR2RGB)\n # 分类\n clf = classify.classify(img_RGB) # 返回0/1\n\n gqi = fromarray(np.uint8(img_RGB))\n gqi.save(name)\n rect = cv2.minAreaRect(cnt) # 最小外接矩形\n box = np.int0(cv2.boxPoints(rect)) # 矩形的四个角点取整\n p1 = box[0]\n p2 = box[1]\n p3 = box[2]\n p4 = p2 - p1\n p5 = p3 - p2\n p6 = math.hypot(p4[0], p4[1])\n p7 = math.hypot(p5[0], p5[1])\n if abs(p6 - p7) > 100:\n capSet.append(i)\n pnSet.append(\"立\")\n else:\n capSet.append(i)\n if clf == 0:\n pnSet.append(\"正\")\n else:\n pnSet.append(\"反\")\n # 因为目前测试图片问题,如果没有10个完全被识别展示没什么用,可以看命令行里的输出\n # print(colorSet)\n # print(capSet)\n # print(pnSet)\n return (capSet, pnSet)\n\n\ndef point_color(p):\n if p[0] > 180 and p[1] > 180 and p[2] < 170:\n return 3\n elif p[0] > 180 and p[1] < 170 and p[2] > 180:\n return 4\n elif p[0] < 170 and p[1] > 180 and p[2] > 180:\n return 5\n elif p[0] > 200 and p[1] > 200 and p[2] > 200:\n return 6\n elif p[0] < 50 and p[1] < 50 and p[2] < 50:\n return 7\n elif abs(p[0] - p[1]) < 10 and abs(p[0] - p[2]) < 10:\n return 8\n elif max(p[0], p[1], p[2]) == p[0]:\n return 0\n elif max(p[0], p[1], p[2]) == p[1]:\n return 1\n elif max(p[0], p[1], p[2]) == p[2]:\n return 2\n\n\n# 0:R, 1:G, 2:B, 3:Yellow, 4:Purple, 5:Cyan, 6:White, 7:Black, 8:Gray\n\n\ndef prior_color(list):\n r = g = b = y = p = c = w = bl = gr = 0\n for i in range(len(list)):\n # print(list[i])\n if list[i] == 0:\n r += 1\n elif list[i] == 1:\n g += 1\n elif list[i] == 2:\n b += 1\n elif list[i] == 3:\n y += 1\n elif list[i] == 4:\n p += 1\n elif list[i] == 5:\n c += 1\n elif list[i] == 6:\n w += 1\n elif list[i] == 7:\n bl += 1\n elif list[i] == 8:\n gr += 1\n sum_pc = (r + g + b + y + p + c + w + bl + gr) * 0.6\n if max(r, g, b, y, p, c, w, bl, gr) == r and r > sum_pc:\n return 0\n elif max(r, g, b, y, p, c, w, bl, gr) == g and g > sum_pc:\n return 1\n elif max(r, g, b, y, p, c, w, bl, gr) == b and b > sum_pc:\n return 2\n elif max(r, g, b, y, p, c, w, bl, gr) == y and y > sum_pc:\n return 3\n elif max(r, g, b, y, p, c, w, bl, gr) == p and p > sum_pc:\n return 4\n elif max(r, g, b, y, p, c, w, bl, gr) == c and c > sum_pc:\n return 5\n elif max(r, g, b, y, p, c, w, bl, gr) == w and w > sum_pc:\n return 6\n elif max(r, g, b, y, p, c, w, bl, gr) == bl and bl > sum_pc:\n return 7\n elif max(r, g, b, y, p, c, w, bl, gr) == gr and gr > sum_pc:\n return 8\n else:\n return -1\n\n\ndef color(img_RGB):\n print(img_RGB)\n print(len(img_RGB))\n l = len(img_RGB)\n o_x = o_y = int(l / 2)\n cycle_colors = []\n ok = True\n count = 0\n for r in range(70, int(l / 2)):\n count += 1\n point_colors = []\n for x in range(r):\n y = int(math.sqrt(r * r - x * x))\n point_colors.append(point_color(img_RGB[o_x + x][o_y + y]))\n point_colors.append(point_color(img_RGB[o_x + x][o_y - y]))\n point_colors.append(point_color(img_RGB[o_x - x][o_y + y]))\n point_colors.append(point_color(img_RGB[o_x - x][o_y - y]))\n cycle_colors.append(prior_color(point_colors))\n ok = True\n if count > 5:\n for i in range(1, 4):\n if cycle_colors[len(cycle_colors) - 1 - i] != cycle_colors[len(cycle_colors) - 1] or cycle_colors[\n len(cycle_colors) - 1 - i] == -1:\n ok = False\n if ok:\n return cycle_colors[len(cycle_colors) - 1]\n return 0\n", "sub_path": "代码+图片+机器学习训练代码/cvfuncs.py", "file_name": "cvfuncs.py", "file_ext": "py", "file_size_in_byte": 9709, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "cv2.cvtColor", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cv2.Sobel", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.CV_32F", "line_number": 17, "usage_type": "attribute"}, {"api_name": "cv2.Sobel", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.CV_32F", "line_number": 18, "usage_type": "attribute"}, {"api_name": "cv2.subtract", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.convertScaleAbs", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cv2.dilate", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.dilate", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 25, "usage_type": "attribute"}, {"api_name": "cv2.getStructuringElement", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.MORPH_ELLIPSE", "line_number": 26, "usage_type": "attribute"}, {"api_name": "cv2.morphologyEx", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.MORPH_CLOSE", "line_number": 27, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 28, "usage_type": "attribute"}, {"api_name": "cv2.erode", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.medianBlur", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.dilate", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.morphologyEx", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.MORPH_CLOSE", "line_number": 32, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 37, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY_INV", "line_number": 38, "usage_type": "attribute"}, {"api_name": "cv2.THRESH_OTSU", "line_number": 38, "usage_type": "attribute"}, {"api_name": "cv2.findContours", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.RETR_TREE", "line_number": 39, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 39, "usage_type": "attribute"}, {"api_name": "cv2.minEnclosingCircle", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.int0", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 59, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 59, "usage_type": "attribute"}, {"api_name": "classify.classify", "line_number": 61, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 63, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.minAreaRect", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.int0", "line_number": 68, "usage_type": "call"}, {"api_name": "cv2.boxPoints", "line_number": 68, "usage_type": "call"}, {"api_name": "math.hypot", "line_number": 74, "usage_type": "call"}, {"api_name": "math.hypot", "line_number": 75, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 90, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 90, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 90, "usage_type": "attribute"}, {"api_name": "cv2.minEnclosingCircle", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.int0", "line_number": 104, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 112, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 112, "usage_type": "attribute"}, {"api_name": "classify.classify", "line_number": 114, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 116, "usage_type": "call"}, {"api_name": "cv2.minAreaRect", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.int0", "line_number": 119, "usage_type": "call"}, {"api_name": "cv2.boxPoints", "line_number": 119, "usage_type": "call"}, {"api_name": "math.hypot", "line_number": 125, "usage_type": "call"}, {"api_name": "math.hypot", "line_number": 126, "usage_type": "call"}, {"api_name": "cv2.minEnclosingCircle", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.int0", "line_number": 151, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 159, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 159, "usage_type": "attribute"}, {"api_name": "classify.classify", "line_number": 161, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 163, "usage_type": "call"}, {"api_name": "cv2.minAreaRect", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.int0", "line_number": 166, "usage_type": "call"}, {"api_name": "cv2.boxPoints", "line_number": 166, "usage_type": "call"}, {"api_name": "math.hypot", "line_number": 172, "usage_type": "call"}, {"api_name": "math.hypot", "line_number": 173, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 271, "usage_type": "call"}]} +{"seq_id": "384028399", "text": "import json\nimport os\nfrom datetime import date\nfrom enum import Enum\nfrom typing import List\n\nimport requests\nfrom apscheduler.job import Job\n\nimport apps.configs.lector_variables as var\nimport apps.utils.email_util as email_util\nimport apps.utils.git_util as git_util\nimport apps.utils.scheduler_util as scheduler_util\nfrom apps.configs.loggers import get_logger\nfrom apps.configs.variables import Var\nfrom apps.models.emails import EmailModelo\nfrom apps.models.errores import AppException\nfrom apps.models.taiga import EmailTaiga, ReportesConfig\nfrom apps.services.taiga.taiga_reportes_config_service import \\\n obtener_json_config\nfrom apps.services.taiga.taiga_service import generar_reporte_json\n\n_sched = scheduler_util.crear_scheduler()\n\n_EMAIL_ENVIADOR = var.get(Var.EMAIL_ENVIADOR)\n_EMAIL_PASS = var.get(Var.EMAIL_PASS)\n_GENERADOR_PDF_HOST = var.get(Var.GENERADOR_PDF_HOST)\n\n\nclass Errores(Enum):\n SERVICIO_GENERAR_REPORTE = 'SERVICIO_GENERAR_REPORTE'\n\n\ndef _funcion():\n print('funcion vacia')\n\n\ndef generar_todos_los_reportes_manualmente():\n configs = obtener_json_config()\n\n for config in configs:\n generar_reporte(config)\n\n\ndef iniciar_proceso_automatico():\n '''\n Obtine el json guardado con la configuracion autormatica de los reportes\n '''\n configs = obtener_json_config()\n get_logger().info(\n f'Iniciando proceso automatico con la siguiente config: {configs}')\n\n for config in obtener_json_config():\n job = scheduler_util.agregar_job(_sched, _funcion, config.cron,\n config.nombre)\n job.args = [config]\n job.func = generar_reporte\n\n scheduler_util.inciar_scheduler(_sched)\n\n\ndef parar_proceso_automatico():\n '''\n Obtine el json guardado con la configuracion autormatica de los reportes\n '''\n scheduler_util.parar_scheduler(_sched)\n\n\ndef actualizar_proceso_automatico():\n '''\n Obtine el json guardado con la configuracion autormatica de los reportes\n '''\n parar_proceso_automatico()\n iniciar_proceso_automatico()\n\n\ndef generar_reporte(config: ReportesConfig):\n '''\n Genera el reporte pdf y envia el email al finalizar\n '''\n reporte_json = generar_reporte_json(config)\n\n url_completa = _GENERADOR_PDF_HOST + config.url_generar_reporte\n get_logger().info(f'Ejecutando REST con url -> {url_completa}')\n\n try:\n headers = {'content-type': 'application/json'}\n resultado = requests.post(url_completa,\n data=json.dumps(reporte_json),\n headers=headers)\n except Exception as e:\n mensaje = 'Error desconocido en el servicio de para generar el reporte'\n get_logger().error(mensaje, exc_info=True)\n get_logger().exception(e)\n raise AppException(Errores.SERVICIO_GENERAR_REPORTE, mensaje)\n\n if resultado.status_code != 200:\n mensaje = f'Error servicio generar reporte -> URL: {url_completa}, STATUS: {resultado.status_code}, BODY: {resultado.text}'\n raise AppException(Errores.SERVICIO_GENERAR_REPORTE, mensaje)\n\n contenido_reporte = resultado.content\n\n _enviar_email(config, contenido_reporte)\n _guardar_reporte_en_git(config, contenido_reporte)\n\n\ndef _enviar_email(config: ReportesConfig, contenido_reporte: bytes):\n '''\n Envia el email para terminar con el proceso\n '''\n encabezado = f'Entrega de reporte mensual de {config.nombre} a la fecha {date.today()}'\n cuerpo = f'Muy buenos dias, mediante la presente les hago entrega del reporte mensual, saludos cordiales.'\n\n email_a_enviar = EmailModelo(de=_EMAIL_ENVIADOR,\n contrasenia=_EMAIL_PASS,\n para=config.email_taiga.destinatarios,\n encabezado=encabezado,\n cuerpo=cuerpo,\n copia=config.email_taiga.copiados,\n adjuntos=[(\n _nombre_reporte_final(config),\n contenido_reporte,\n )])\n\n email_util.enviar_email(email_a_enviar)\n\n\ndef _nombre_reporte_final(config: ReportesConfig) -> str:\n '''\n Devuelve el nombre del reporte\n '''\n return f'{date.today()} - {config.nombre}.md'\n\n\ndef _guardar_reporte_en_git(config: ReportesConfig, contenido_reporte: bytes):\n '''\n Guarda el reporte en git clonando el proyecto, creando el archivo de reporte,\n commiteando y pusheando\n '''\n repo = git_util.clonar_repo_git(config.git)\n get_logger().info(\n f'Repo clonado en la ruta: {repo.working_dir} con la url: {config.git.url_repo}')\n\n nombre_reporte = _nombre_reporte_final(config)\n ruta_reporte = os.path.join(repo.working_dir, nombre_reporte)\n\n with open(ruta_reporte, 'wb') as reporte_archivo:\n reporte_archivo.write(contenido_reporte)\n\n mensaje_commit = f'Se sube reporte de forma automatica con nombre: {nombre_reporte}'\n git_util.pushear_a_master(repo, mensaje_commit)\n\n git_util._borrar_carpeta_si_existe(repo.working_dir)\n", "sub_path": "src/apps/services/taiga/taiga_scheduler_service.py", "file_name": "taiga_scheduler_service.py", "file_ext": "py", "file_size_in_byte": 5139, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "apps.utils.scheduler_util.crear_scheduler", "line_number": 23, "usage_type": "call"}, {"api_name": "apps.utils.scheduler_util", "line_number": 23, "usage_type": "name"}, {"api_name": "apps.configs.lector_variables.get", "line_number": 25, "usage_type": "call"}, {"api_name": "apps.configs.lector_variables", "line_number": 25, "usage_type": "name"}, {"api_name": "apps.configs.variables.Var.EMAIL_ENVIADOR", "line_number": 25, "usage_type": "attribute"}, {"api_name": "apps.configs.variables.Var", "line_number": 25, "usage_type": "name"}, {"api_name": "apps.configs.lector_variables.get", "line_number": 26, "usage_type": "call"}, {"api_name": "apps.configs.lector_variables", "line_number": 26, "usage_type": "name"}, {"api_name": "apps.configs.variables.Var.EMAIL_PASS", "line_number": 26, "usage_type": "attribute"}, {"api_name": "apps.configs.variables.Var", "line_number": 26, "usage_type": "name"}, {"api_name": "apps.configs.lector_variables.get", "line_number": 27, "usage_type": "call"}, {"api_name": "apps.configs.lector_variables", "line_number": 27, "usage_type": "name"}, {"api_name": "apps.configs.variables.Var.GENERADOR_PDF_HOST", "line_number": 27, "usage_type": "attribute"}, {"api_name": "apps.configs.variables.Var", "line_number": 27, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 30, "usage_type": "name"}, {"api_name": "apps.services.taiga.taiga_reportes_config_service.obtener_json_config", "line_number": 39, "usage_type": "call"}, {"api_name": "apps.services.taiga.taiga_reportes_config_service.obtener_json_config", "line_number": 49, "usage_type": "call"}, {"api_name": "apps.configs.loggers.get_logger", "line_number": 50, "usage_type": "call"}, {"api_name": "apps.services.taiga.taiga_reportes_config_service.obtener_json_config", "line_number": 53, "usage_type": "call"}, {"api_name": "apps.utils.scheduler_util.agregar_job", "line_number": 54, "usage_type": "call"}, {"api_name": "apps.utils.scheduler_util", "line_number": 54, "usage_type": "name"}, {"api_name": "apps.utils.scheduler_util.inciar_scheduler", "line_number": 59, "usage_type": "call"}, {"api_name": "apps.utils.scheduler_util", "line_number": 59, "usage_type": "name"}, {"api_name": "apps.utils.scheduler_util.parar_scheduler", "line_number": 66, "usage_type": "call"}, {"api_name": "apps.utils.scheduler_util", "line_number": 66, "usage_type": "name"}, {"api_name": "apps.models.taiga.ReportesConfig", "line_number": 77, "usage_type": "name"}, {"api_name": "apps.services.taiga.taiga_service.generar_reporte_json", "line_number": 81, "usage_type": "call"}, {"api_name": "apps.configs.loggers.get_logger", "line_number": 84, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 88, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 89, "usage_type": "call"}, {"api_name": "apps.configs.loggers.get_logger", "line_number": 93, "usage_type": "call"}, {"api_name": "apps.configs.loggers.get_logger", "line_number": 94, "usage_type": "call"}, {"api_name": "apps.models.errores.AppException", "line_number": 95, "usage_type": "call"}, {"api_name": "apps.models.errores.AppException", "line_number": 99, "usage_type": "call"}, {"api_name": "apps.models.taiga.ReportesConfig", "line_number": 107, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 111, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 111, "usage_type": "name"}, {"api_name": "apps.models.emails.EmailModelo", "line_number": 114, "usage_type": "call"}, {"api_name": "apps.utils.email_util.enviar_email", "line_number": 125, "usage_type": "call"}, {"api_name": "apps.utils.email_util", "line_number": 125, "usage_type": "name"}, {"api_name": "apps.models.taiga.ReportesConfig", "line_number": 128, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 132, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 132, "usage_type": "name"}, {"api_name": "apps.models.taiga.ReportesConfig", "line_number": 135, "usage_type": "name"}, {"api_name": "apps.utils.git_util.clonar_repo_git", "line_number": 140, "usage_type": "call"}, {"api_name": "apps.utils.git_util", "line_number": 140, "usage_type": "name"}, {"api_name": "apps.configs.loggers.get_logger", "line_number": 141, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path", "line_number": 145, "usage_type": "attribute"}, {"api_name": "apps.utils.git_util.pushear_a_master", "line_number": 151, "usage_type": "call"}, {"api_name": "apps.utils.git_util", "line_number": 151, "usage_type": "name"}, {"api_name": "apps.utils.git_util._borrar_carpeta_si_existe", "line_number": 153, "usage_type": "call"}, {"api_name": "apps.utils.git_util", "line_number": 153, "usage_type": "name"}]} +{"seq_id": "602614100", "text": "from xml.dom.minidom import parseString, Element\n\n\nfstring = \"\"\"\n\n \n fdcr\n .ods\n \n \n \n fdcop\n 15\n \n\"\"\"\n\n\nclass myXml():\n DEBUG = True\n\n def __init__(self, xml_string):\n self.dictionnaire = {}\n self.node_tagname = \"\"\n self.node_tagvalue = \"\"\n self.chemin = []\n self.integrate(xml_string)\n\n def integrate(self, xml_string):\n self.root = None\n try:\n parse = parseString(xml_string)\n if parse:\n self.get_list(parse)\n\n except Exception as e:\n print(\"Error:\", e)\n\n def __str__(self):\n res = \"{\\n\"\n for cle, valeur in self.dictionnaire.items():\n res += f\" '{cle}': '{valeur}',\\n\"\n res = res + \"}\"\n return res\n\n def get_attributes(self, node):\n if not node.hasAttributes(): \n return \"\"\n\n res = \" \"\n for attr, valeur in node.attributes.items():\n res += f\"{attr}='{valeur}' \"\n return res.rstrip()\n\n def get_list(self, current, level=0):\n res = False\n for node in current.childNodes:\n if node.nodeType == Element.TEXT_NODE:\n if self.DEBUG: \n print(node.data.strip(), end=\"\")\n self.node_tagvalue = node.data.strip()\n\n elif node.nodeType == Element.ELEMENT_NODE:\n res = True\n if self.DEBUG: \n print(\"\\n\" + \" \"*level + f\"<{node.nodeName}{self.get_attributes(node)}>\", end=\"\")\n\n self.node_tagname = node.nodeName\n self.node_tagvalue = \"\"\n self.chemin.append(self.node_tagname)\n\n if self.get_list(node, level+1):\n if self.DEBUG: \n print(\"\\n\" + \" \"*level + f\"\", end=\"\")\n else:\n if self.DEBUG: \n if self.node_tagvalue == \"\":\n print(\"\", end=\"\")\n else:\n print(f\"\", end=\"\")\n if self.node_tagvalue:\n self.dictionnaire[\"/\" + \"/\".join(self.chemin)] = self.node_tagvalue\n\n self.chemin.pop()\n\n return res\n\n\nmon_xml = myXml(fstring)\nprint(f\"\\n{mon_xml}\")\n", "sub_path": "cxml2.py", "file_name": "cxml2.py", "file_ext": "py", "file_size_in_byte": 2499, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "xml.dom.minidom.parseString", "line_number": 31, "usage_type": "call"}, {"api_name": "xml.dom.minidom.Element.TEXT_NODE", "line_number": 57, "usage_type": "attribute"}, {"api_name": "xml.dom.minidom.Element", "line_number": 57, "usage_type": "name"}, {"api_name": "xml.dom.minidom.Element.ELEMENT_NODE", "line_number": 62, "usage_type": "attribute"}, {"api_name": "xml.dom.minidom.Element", "line_number": 62, "usage_type": "name"}]} +{"seq_id": "169146267", "text": "import json\nfrom typing import List, Protocol\n\nimport redis\n\nfrom core.config import RedisSettings\nfrom domain.enums.similarity_enums import SimilarityModelType\n\n\nREDIS_SETTINGS = RedisSettings()\nREDIS_CLIENT = redis.Redis(host=REDIS_SETTINGS.redis_host, port=REDIS_SETTINGS.redis_port)\n\n\nclass AbstractKvsRepository(Protocol):\n \n def get_similar_movie_id_list(\n self, \n movie_id: int, \n model_type: SimilarityModelType\n ) -> List[int]:\n ...\n\n\nclass RedisRepository:\n \n def get_similar_movie_id_list(\n self, \n movie_id: int, \n model_type: SimilarityModelType\n ) -> List[int]:\n \n # キー名を作成\n key = _make_sim_key(movie_id=movie_id, model_type=model_type)\n\n # 類似映画IDを取得\n response = REDIS_CLIENT.get(key)\n if not response:\n return []\n \n return json.loads(response.decode(\"utf-8\"))\n\n\nasync def get_kvs_repository() -> AbstractKvsRepository:\n return RedisRepository()\n\n\ndef _make_sim_key(movie_id: int, model_type: SimilarityModelType) -> str:\n return f\"{movie_id}_{model_type.value}\"\n", "sub_path": "api/app/infra/repository/redis_repository.py", "file_name": "redis_repository.py", "file_ext": "py", "file_size_in_byte": 1143, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "core.config.RedisSettings", "line_number": 10, "usage_type": "call"}, {"api_name": "redis.Redis", "line_number": 11, "usage_type": "call"}, {"api_name": "typing.Protocol", "line_number": 14, "usage_type": "name"}, {"api_name": "domain.enums.similarity_enums.SimilarityModelType", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 20, "usage_type": "name"}, {"api_name": "domain.enums.similarity_enums.SimilarityModelType", "line_number": 29, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 40, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 30, "usage_type": "name"}, {"api_name": "domain.enums.similarity_enums.SimilarityModelType", "line_number": 47, "usage_type": "name"}]} +{"seq_id": "49259577", "text": "# -*- coding: UTF-8 -*-\nimport json\nimport os,sys\n\nclass configure():\n host = \"\"\n port = \"\"\n session = \"\"\n authKey = \"\"\n qq = \"\"\n receiveTime = 5\n done = False\n jiChou = False\n def __init__(self):\n if configure.done == True:\n return\n\n try:\n data = open(\"configure.json\",\"r+\")\n json_str = data.read()\n print(json_str)\n info_json = json.loads(json_str)\n configure.host = info_json[\"host\"]\n configure.port = info_json[\"port\"]\n configure.session = info_json[\"session\"]\n configure.authKey = info_json[\"authKey\"]\n configure.qq = info_json[\"qq\"]\n configure.receiveTime = info_json[\"receiveTime\"]\n configure.done = True\n configure.jiChou = info_json[\"jiChou\"]\n except:\n print(\"configure_json错误\")\n \n\n def upDataSession(self,session):\n configure.session = session", "sub_path": "QQ_robot/Configure_Info.py", "file_name": "Configure_Info.py", "file_ext": "py", "file_size_in_byte": 976, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "json.loads", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "374496090", "text": "from __future__ import division\nfrom __future__ import print_function\nfrom operator import itemgetter\nfrom itertools import combinations\nimport time\nimport os\nimport logging\nimport sys\n\nsys.path.insert(0, './')\n\nimport tensorflow as tf\nimport numpy as np\nimport networkx as nx\nimport scipy.sparse as sp\nfrom sklearn import metrics\n\nfrom decagon.deep.optimizer import DecagonOptimizer\nfrom decagon.deep.model import DecagonModel\nfrom decagon.deep.minibatch import EdgeMinibatchIterator\nfrom decagon.utility import rank_metrics, preprocessing\nfrom polypharmacy.utility import *\nfrom collections import Counter\n\nfrom decagon.utility.visualization import WriterTensorboardX\nimport datetime\n\n# Train on CPU (hide GPU) due to memory constraints\n# os.environ['CUDA_VISIBLE_DEVICES'] = \"\"\n\n# Train on GPU\nos.environ[\"CUDA_DEVICE_ORDER\"] = 'PCI_BUS_ID'\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = '0'\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\n\nnp.random.seed(0)\n\n\n###########################################################\n#\n# Functions\n#\n###########################################################\n\n\ndef get_accuracy_scores(edges_pos, edges_neg, edge_type):\n feed_dict.update({placeholders['dropout']: 0})\n feed_dict.update({placeholders['batch_edge_type_idx']: minibatch.edge_type2idx[edge_type]})\n feed_dict.update({placeholders['batch_row_edge_type']: edge_type[0]})\n feed_dict.update({placeholders['batch_col_edge_type']: edge_type[1]})\n rec = sess.run(opt.predictions, feed_dict=feed_dict)\n\n def sigmoid(x):\n return 1. / (1 + np.exp(-x))\n\n # Predict on test set of edges\n preds = []\n actual = []\n predicted = []\n edge_ind = 0\n for u, v in edges_pos[edge_type[:2]][edge_type[2]]:\n score = sigmoid(rec[u, v])\n preds.append(score)\n assert adj_mats_orig[edge_type[:2]][edge_type[2]][u, v] == 1, 'Problem 1'\n\n actual.append(edge_ind)\n predicted.append((score, edge_ind))\n edge_ind += 1\n\n preds_neg = []\n for u, v in edges_neg[edge_type[:2]][edge_type[2]]:\n score = sigmoid(rec[u, v])\n preds_neg.append(score)\n assert adj_mats_orig[edge_type[:2]][edge_type[2]][u, v] == 0, 'Problem 0'\n\n predicted.append((score, edge_ind))\n edge_ind += 1\n\n preds_all = np.hstack([preds, preds_neg])\n preds_all = np.nan_to_num(preds_all)\n labels_all = np.hstack([np.ones(len(preds)), np.zeros(len(preds_neg))])\n predicted = list(zip(*sorted(predicted, reverse=True, key=itemgetter(0))))[1]\n\n roc_sc = metrics.roc_auc_score(labels_all, preds_all)\n aupr_sc = metrics.average_precision_score(labels_all, preds_all)\n apk_sc = rank_metrics.apk(actual, predicted, k=50)\n\n return roc_sc, aupr_sc, apk_sc\n\n\ndef construct_placeholders(edge_types):\n placeholders = {\n 'batch': tf.placeholder(tf.int32, name='batch'),\n 'batch_edge_type_idx': tf.placeholder(tf.int32, shape=(), name='batch_edge_type_idx'),\n 'batch_row_edge_type': tf.placeholder(tf.int32, shape=(), name='batch_row_edge_type'),\n 'batch_col_edge_type': tf.placeholder(tf.int32, shape=(), name='batch_col_edge_type'),\n 'degrees': tf.placeholder(tf.int32),\n 'dropout': tf.placeholder_with_default(0., shape=()),\n }\n placeholders.update({\n 'adj_mats_%d,%d,%d' % (i, j, k): tf.sparse_placeholder(tf.float32)\n for i, j in edge_types for k in range(edge_types[i, j])})\n placeholders.update({\n 'feat_%d' % i: tf.sparse_placeholder(tf.float32)\n for i, _ in edge_types})\n return placeholders\n\n\n###########################################################\n#\n# Load and preprocess data (This is a dummy toy example!)\n#\n###########################################################\n\n####\n# The following code uses artificially generated and very small networks.\n# Expect less than excellent performance as these random networks do not have any interesting structure.\n# The purpose of main.py is to show how to use the code!\n#\n# All preprocessed datasets used in the drug combination study are at: http://snap.stanford.edu/decagon:\n# (1) Download datasets from http://snap.stanford.edu/decagon to your local machine.\n# (2) Replace dummy toy datasets used here with the actual datasets you just downloaded.\n# (3) Train & test the model.\n####\n\ncombo2stitch, combo2se, se2name = load_combo_se(fname='./data/csv/bio-decagon-combo.csv')\ngene_net, gene_idx_dict = load_ppi('./data/csv/bio-decagon-ppi.csv')\nstitch2proteins = load_targets(fname='./data/csv/bio-decagon-targets.csv')\nstitch2se, se2name_mono = load_mono_se('./data/csv/bio-decagon-mono.csv')\n\nall_drug = []\nfor drug_pair in combo2stitch.values():\n all_drug += drug_pair\n\n# look-up table for drugs\ndrug_id = set(all_drug)\nprint('Drug number = %d' % len(drug_id))\ndrug_idx_dict = {item: idx for idx, item in enumerate(drug_id)}\n\n\n# we only focus on most common 964 side effect\ndef get_se_counter(se_map):\n side_effects = []\n for drug in se_map:\n side_effects += list(set(se_map[drug]))\n return Counter(side_effects)\n\n\ncombo_counter = get_se_counter(combo2se)\n\ncommon_se_combo_id = []\ncommon_se_combo_id_counts = []\ncommon_se_combo_names = []\nfor se, count in combo_counter.most_common(964):\n common_se_combo_id += [se]\n common_se_combo_id_counts += [count]\n common_se_combo_names += [se2name[se]]\n\n# look-up table for target combo side effect\nse_id = common_se_combo_id\nse_idx_dict = {item: idx for idx, item in enumerate(se_id)}\n\nval_test_size = 0.1\nn_genes = len(gene_idx_dict)\nn_drugs = len(drug_id)\nn_drugdrug_rel_types = len(se_id)\n\ngene_adj = nx.adjacency_matrix(gene_net)\ngene_degrees = np.array(gene_adj.sum(axis=0)).squeeze()\n\ngene_row = []\ndrug_col = []\n\nfor drug_selected, target_gene in stitch2proteins.items():\n drug_selected_idx = drug_idx_dict[drug_selected]\n\n for gene in target_gene:\n try:\n target_gene_idx = gene_idx_dict[gene]\n gene_row += [target_gene_idx]\n drug_col += [drug_selected_idx]\n except:\n # some target proteins lie outside ppi graph\n pass\n\nrow = np.array(gene_row)\ncol = np.array(drug_col)\ndata = np.ones_like(row)\ngene_drug_adj = sp.csr_matrix((data, (row, col)), shape=(len(gene_idx_dict), len(drug_id)))\ndrug_gene_adj = gene_drug_adj.transpose(copy=True)\n\ndrug_drug_adj_list = np.zeros([n_drugdrug_rel_types, n_drugs, n_drugs])\nfor drug_pair, se_list in combo2se.items():\n drug_1, drug_2 = combo2stitch[drug_pair]\n drug_1_id, drug_2_id = drug_idx_dict[drug_1], drug_idx_dict[drug_2]\n\n for se in se_list:\n if se in se_idx_dict:\n se_idx = se_idx_dict[se]\n drug_drug_adj_list[se_idx][drug_1_id, drug_2_id] = 1\n drug_drug_adj_list[se_idx][drug_2_id, drug_1_id] = 1\n\ndrug_drug_adj_list = [sp.csr_matrix(mat) for mat in drug_drug_adj_list]\ndrug_degrees_list = [np.array(drug_adj.sum(axis=0)).squeeze() for drug_adj in drug_drug_adj_list]\n\n# data representation\nadj_mats_orig = {\n (0, 0): [gene_adj],\n (0, 1): [gene_drug_adj],\n (1, 0): [drug_gene_adj],\n (1, 1): drug_drug_adj_list,\n}\ndegrees = {\n 0: [gene_degrees],\n 1: drug_degrees_list,\n}\n\n# featureless (genes)\ngene_feat = sp.identity(n_genes)\ngene_nonzero_feat, gene_num_feat = gene_feat.shape\ngene_feat = preprocessing.sparse_to_tuple(gene_feat.tocoo())\n\n# features (drugs): side effects of individual drugs were used as additional features for drug nodes.\nmono_se_list = []\nfor mono_se in stitch2se.values():\n mono_se_list.append(list(mono_se))\n\nmono_se_list = np.concatenate(mono_se_list)\n\nmono_se_id = np.array(list(set(mono_se_list)))\nmono_se_idx_dict = {item: idx for idx, item in enumerate(mono_se_id)}\n\ndrug_feature_row = []\ndrug_feature_col = []\n\nfor drug_selected, mono_se_list in stitch2se.items():\n drug_selected_idx = drug_idx_dict[drug_selected]\n\n for mono_se in mono_se_list:\n mono_se_idx = mono_se_idx_dict[mono_se]\n drug_feature_row += [drug_selected_idx]\n drug_feature_col += [mono_se_idx]\n\nrow = np.array(drug_feature_row)\ncol = np.array(drug_feature_col)\ndata = np.ones_like(row)\ndrug_feat = sp.csr_matrix((data, (row, col)), shape=(n_drugs, len(mono_se_id)))\n\ndrug_nonzero_feat, drug_num_feat = drug_feat.shape\ndrug_feat = preprocessing.sparse_to_tuple(drug_feat.tocoo())\n\n# data representation\nnum_feat = { # input dim\n 0: gene_num_feat,\n 1: drug_num_feat,\n}\nnonzero_feat = {\n 0: gene_nonzero_feat,\n 1: drug_nonzero_feat,\n}\nfeat = {\n 0: gene_feat,\n 1: drug_feat,\n}\n\nedge_type2dim = {k: [adj.shape for adj in adjs] for k, adjs in adj_mats_orig.items()}\nedge_type2decoder = {\n (0, 0): 'bilinear',\n (0, 1): 'bilinear',\n (1, 0): 'bilinear',\n (1, 1): 'dedicom', # To capture the polypharmacy combinatorics\n}\n\nedge_types = {k: len(v) for k, v in adj_mats_orig.items()}\nnum_edge_types = sum(edge_types.values())\nprint(\"Edge types:\", \"%d\" % num_edge_types)\n\n###########################################################\n#\n# Settings and placeholders\n#\n###########################################################\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\nflags.DEFINE_integer('neg_sample_size', 1, 'Negative sample size.')\nflags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.')\nflags.DEFINE_integer('epochs', 100, 'Number of epochs to train.')\nflags.DEFINE_integer('hidden1', 64, 'Number of units in hidden layer 1.')\nflags.DEFINE_integer('hidden2', 32, 'Number of units in hidden layer 2.')\nflags.DEFINE_float('weight_decay', 0, 'Weight for L2 loss on embedding matrix.')\nflags.DEFINE_float('dropout', 0.1, 'Dropout rate (1 - keep probability).')\nflags.DEFINE_float('max_margin', 0.1, 'Max margin parameter in hinge loss')\nflags.DEFINE_integer('batch_size', 512, 'minibatch size.')\nflags.DEFINE_boolean('bias', True, 'Bias term.')\n# Important -- Do not evaluate/print validation performance every iteration as it can take\n# substantial amount of time\nPRINT_PROGRESS_EVERY = 500\n\nprint(\"Defining placeholders\")\nplaceholders = construct_placeholders(edge_types)\n\n###########################################################\n#\n# Create minibatch iterator, model and optimizer\n#\n###########################################################\n\nprint(\"Create minibatch iterator\")\nminibatch = EdgeMinibatchIterator(\n adj_mats=adj_mats_orig,\n feat=feat,\n edge_types=edge_types,\n batch_size=FLAGS.batch_size,\n val_test_size=val_test_size\n)\n\nprint(\"Create model\")\nmodel = DecagonModel(\n placeholders=placeholders,\n num_feat=num_feat,\n nonzero_feat=nonzero_feat,\n edge_types=edge_types,\n decoders=edge_type2decoder,\n)\n\nprint(\"Create optimizer\")\nwith tf.name_scope('optimizer'):\n opt = DecagonOptimizer(\n embeddings=model.embeddings,\n latent_inters=model.latent_inters,\n latent_varies=model.latent_varies,\n degrees=degrees,\n edge_types=edge_types,\n edge_type2dim=edge_type2dim,\n placeholders=placeholders,\n batch_size=FLAGS.batch_size,\n margin=FLAGS.max_margin\n )\n\nprint(\"Initialize session\")\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\nfeed_dict = {}\n\n# setup visualization writer instance\n\nstart_time = datetime.datetime.now().strftime('%m%d_%H%M%S')\nwriter_dir = os.path.join('./experiment/log/', start_time)\n\nif not os.path.exists(writer_dir):\n os.makedirs(writer_dir)\n\nwriter = WriterTensorboardX(writer_dir, logging.getLogger(start_time), enable=True)\n\n\n###########################################################\n#\n# epoch evaluation\n#\n###########################################################\n\ndef epoch_eval(minibatch, epoch, mode='val'):\n # test #\n test_list = [1, 2, 3, 4, 5]\n writer.add_scalars('ave_test', np.mean(test_list))\n writer.add_histogram('distribution_test', np.array(test_list))\n\n side_effect_roc_score = []\n side_effect_auprc_score = []\n side_effect_apk_score = []\n for et in range(num_edge_types):\n if mode == 'val':\n roc_score, auprc_score, apk_score = get_accuracy_scores(\n minibatch.val_edges, minibatch.val_edges_false, minibatch.idx2edge_type[et])\n if mode == 'test':\n roc_score, auprc_score, apk_score = get_accuracy_scores(\n minibatch.test_edges, minibatch.test_edges_false, minibatch.idx2edge_type[et])\n # print(\"Edge type=\", \"[%02d, %02d, %02d]\" % minibatch.idx2edge_type[et])\n # print(\"Edge type:\", \"%04d\" % et, \"Test AUROC score\", \"{:.5f}\".format(roc_score))\n # print(\"Edge type:\", \"%04d\" % et, \"Test AUPRC score\", \"{:.5f}\".format(auprc_score))\n # print(\"Edge type:\", \"%04d\" % et, \"Test AP@k score\", \"{:.5f}\".format(apk_score))\n\n writer.set_step(epoch, mode)\n if et <= 2:\n writer.add_scalars('%04d_roc_score' % (et), roc_score)\n writer.add_scalars('%04d_auprc_score' % (et), auprc_score)\n writer.add_scalars('%04d_apk_score' % (et), apk_score)\n else:\n side_effect_roc_score.append(roc_score)\n side_effect_auprc_score.append(auprc_score)\n side_effect_apk_score.append(apk_score)\n\n writer.add_scalars('ave_side_effect_auc_score', np.mean(side_effect_roc_score))\n writer.add_scalars('ave_side_effect_auprc_score', np.mean(side_effect_auprc_score))\n writer.add_scalars('ave_side_effect_apk_score', np.mean(side_effect_apk_score))\n\n writer.add_histogram('distribution_side_effect_auc_score', np.array(side_effect_roc_score))\n writer.add_histogram('distribution_side_effect_auprc_score', np.array(side_effect_auprc_score))\n writer.add_histogram('distribution_side_effect_apk_score', np.array(side_effect_apk_score))\n\n\n###########################################################\n#\n# Train model\n#\n###########################################################\n\nprint(\"Train model\")\nfor epoch in range(FLAGS.epochs):\n\n minibatch.shuffle()\n\n itr = 0\n while not minibatch.end():\n # Construct feed dictionary\n feed_dict = minibatch.next_minibatch_feed_dict(placeholders=placeholders)\n feed_dict = minibatch.update_feed_dict(\n feed_dict=feed_dict,\n dropout=FLAGS.dropout,\n placeholders=placeholders)\n\n t = time.time()\n\n # Training step: run single weight update\n outs = sess.run([opt.opt_op, opt.cost, opt.batch_edge_type_idx], feed_dict=feed_dict)\n train_cost = outs[1]\n batch_edge_type = outs[2]\n\n if itr % PRINT_PROGRESS_EVERY == 0:\n val_auc, val_auprc, val_apk = get_accuracy_scores(\n minibatch.val_edges, minibatch.val_edges_false,\n minibatch.idx2edge_type[minibatch.current_edge_type_idx])\n\n print(\"Epoch:\", \"%04d\" % (epoch + 1), \"Iter:\", \"%04d\" % (itr + 1), \"Edge:\", \"%04d\" % batch_edge_type,\n \"train_loss=\", \"{:.5f}\".format(train_cost),\n \"val_roc=\", \"{:.5f}\".format(val_auc), \"val_auprc=\", \"{:.5f}\".format(val_auprc),\n \"val_apk=\", \"{:.5f}\".format(val_apk), \"time=\", \"{:.5f}\".format(time.time() - t))\n\n if itr == 0: # test model for each epoch\n print(\"test model !!!\")\n epoch_eval(minibatch, epoch, mode='test')\n\n itr += 1\n\nprint(\"Optimization finished!\")\n", "sub_path": "main_real.py", "file_name": "main_real.py", "file_ext": "py", "file_size_in_byte": 15236, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "sys.path.insert", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 33, "usage_type": "attribute"}, {"api_name": "tensorflow.ConfigProto", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 37, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.nan_to_num", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 82, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 83, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 85, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 85, "usage_type": "name"}, {"api_name": "sklearn.metrics.average_precision_score", "line_number": 86, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 86, "usage_type": "name"}, {"api_name": "decagon.utility.rank_metrics.apk", "line_number": 87, "usage_type": "call"}, {"api_name": "decagon.utility.rank_metrics", "line_number": 87, "usage_type": "name"}, {"api_name": "tensorflow.placeholder", "line_number": 94, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 94, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 95, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 95, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 96, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 96, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 97, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 97, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 98, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 98, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder_with_default", "line_number": 99, "usage_type": "call"}, {"api_name": "tensorflow.sparse_placeholder", "line_number": 102, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 102, "usage_type": "attribute"}, {"api_name": "tensorflow.sparse_placeholder", "line_number": 105, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 105, "usage_type": "attribute"}, {"api_name": "collections.Counter", "line_number": 147, "usage_type": "call"}, {"api_name": "networkx.adjacency_matrix", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 189, "usage_type": "call"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 190, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 190, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 193, "usage_type": "call"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 204, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 204, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 205, "usage_type": "call"}, {"api_name": "scipy.sparse.identity", "line_number": 220, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 220, "usage_type": "name"}, {"api_name": "decagon.utility.preprocessing.sparse_to_tuple", "line_number": 222, "usage_type": "call"}, {"api_name": "decagon.utility.preprocessing", "line_number": 222, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 229, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 231, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 246, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 247, "usage_type": "call"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 248, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 248, "usage_type": "name"}, {"api_name": "decagon.utility.preprocessing.sparse_to_tuple", "line_number": 251, "usage_type": "call"}, {"api_name": "decagon.utility.preprocessing", "line_number": 251, "usage_type": "name"}, {"api_name": "tensorflow.app", "line_number": 285, "usage_type": "attribute"}, {"api_name": "decagon.deep.minibatch.EdgeMinibatchIterator", "line_number": 311, "usage_type": "call"}, {"api_name": "decagon.deep.model.DecagonModel", "line_number": 320, "usage_type": "call"}, {"api_name": "tensorflow.name_scope", "line_number": 329, "usage_type": "call"}, {"api_name": "decagon.deep.optimizer.DecagonOptimizer", "line_number": 330, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 343, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 344, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 349, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 349, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 350, "usage_type": "call"}, {"api_name": "os.path", "line_number": 350, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 352, "usage_type": "call"}, {"api_name": "os.path", "line_number": 352, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 353, "usage_type": "call"}, {"api_name": "decagon.utility.visualization.WriterTensorboardX", "line_number": 355, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 355, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 367, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 368, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 395, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 396, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 397, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 399, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 400, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 401, "usage_type": "call"}, {"api_name": "time.time", "line_number": 424, "usage_type": "call"}, {"api_name": "time.time", "line_number": 439, "usage_type": "call"}]} +{"seq_id": "611007002", "text": "import pymysql\ndbconfig = {\n 'host' :'10.62.195.174',\n 'user' : 'm2o',\n 'passwd' : 'd4jmYqJvthhc32QI',\n 'db' :'m2o_desktop'\n }\n# 1. 连接数据库连接\nconn = pymysql.connect(**dbconfig)\n# 2. 创建游标, 给数据库发送sql指令\ncur = conn.cursor()\n# 3. 执行sql语句: 查看数据\ncur.execute('select * from m2o_crontab')\ntry:\n with open('crontab.txt','w') as file_it:\n results=cur.fetchall()\n print(results)\n for row_infor in results:\n file_it.write(','.join('%s'%id for id in row_infor)+'\\n')\nexcept Exception as Error:\n print(Error)\nelse:\n print(\"success\")\nfinally:\n cur.close()\n conn.close()", "sub_path": "python_test/dbquery.py", "file_name": "dbquery.py", "file_ext": "py", "file_size_in_byte": 697, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pymysql.connect", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "375366899", "text": "#read wajah\nimport cv2, os\nimport numpy as np\nfrom PIL import Image\nwajahDir = 'datawajah'\nreadDir = 'readwajah'\nfaceDetector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\ndef getImageLabel(path):\n imagePaths = [os.path.join(path, f) for f in os.listdir(path)]\n faceSamples = []\n faceIDs = []\n for imagePath in imagePaths :\n PILImg = Image.open(imagePath).convert('L') #convert ke dalam grey\n imgNum = np.array(PILImg, 'uint8')\n faceId = int(os.path.split(imagePath)[-1].split(\".\")[1])\n faces = faceDetector.detectMultiScale(imgNum)\n for (x, y, w, h) in faces :\n faceSamples.append(imgNum[y:y+h, x:x+w])\n faceIDs.append(faceId)\n return faceSamples, faceIDs\n \nfaceRecognizer = cv2.face.LBPHFaceRecognizer_create()\nprint(\"Mesin sedang melakukan training data wajah\")\nfaces, IDs = getImageLabel(wajahDir)\nprint(IDs)\nfaceRecognizer.train(faces, np.array(IDs))\n#simpan \nfaceRecognizer.write(readDir+'/training.xml')\nprint('Sebanyak {0} data wajah telah ditraining ke mesin.', format(len(np.unique(IDs))))", "sub_path": "read.py", "file_name": "read.py", "file_ext": "py", "file_size_in_byte": 1100, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "cv2.CascadeClassifier", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 10, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 14, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 14, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cv2.face.LBPHFaceRecognizer_create", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.face", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "87908204", "text": "# EMACS settings: -*-\ttab-width: 2; indent-tabs-mode: t -*-\n# vim: tabstop=2:shiftwidth=2:noexpandtab\n# kate: tab-width 2; replace-tabs off; indent-width 2;\n# \n# ==============================================================================\n# Authors:\t\t\t\t \tPatrick Lehmann\n# \n# Python Class:\t\t\tThis PoCXCOCompiler compiles xco IPCores to netlists\n# \n# Description:\n# ------------------------------------\n#\t\tTODO:\n#\t\t- \n#\t\t- \n#\n# License:\n# ==============================================================================\n# Copyright 2007-2015 Technische Universitaet Dresden - Germany\n#\t\t\t\t\t\t\t\t\t\t\tChair for VLSI-Design, Diagnostics and Architecture\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n#\t\thttp://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n#\n# entry point\nif __name__ != \"__main__\":\n\t# place library initialization code here\n\tpass\nelse:\n\tfrom lib.Functions import Exit\n\tExit.printThisIsNoExecutableFile(\"The PoC-Library - Python Class Compiler(PoCCompiler)\")\n\n# load dependencies\nfrom pathlib import Path\n\nfrom Base.Exceptions import *\nfrom Compiler.Base import PoCCompiler \nfrom Compiler.Exceptions import *\n\nclass Compiler(PoCCompiler):\n\n\t__executables = {}\n\n\tdef __init__(self, host, showLogs, showReport):\n\t\tsuper(self.__class__, self).__init__(host, showLogs, showReport)\n\n\t\tif (host.platform == \"Windows\"):\n\t\t\tself.__executables['CoreGen'] =\t\"coregen.exe\"\n\t\telif (host.platform == \"Linux\"):\n\t\t\tself.__executables['CoreGen'] =\t\"coregen\"\n\t\telse:\n\t\t\traise PlatformNotSupportedException(self.platform)\n\t\t\n\tdef run(self, pocEntity, device):\n\t\timport os\n\t\timport re\n\t\timport shutil\n\t\timport subprocess\n\t\timport textwrap\n\t\n\t\tself.printNonQuiet(str(pocEntity))\n\t\tself.printNonQuiet(\" preparing compiler environment...\")\n\n\t\t# TODO: improve / resolve board to device\n\t\tdeviceString = str(device).upper()\n\t\tdeviceSection = \"Device.\" + deviceString\n\t\t\n\t\t# create temporary directory for CoreGen if not existent\n\t\ttempCoreGenPath = self.host.directories[\"CoreGenTemp\"]\n\t\tif not (tempCoreGenPath).exists():\n\t\t\tself.printVerbose(\" Creating temporary directory for core generator files.\")\n\t\t\tself.printDebug(\" Temporary directory: {0}.\".format(tempCoreGenPath))\n\t\t\ttempCoreGenPath.mkdir(parents=True)\n\n\t\t# create output directory for CoreGen if not existent\n\t\tcoreGenOutputPath = self.host.directories[\"PoCNetList\"] / deviceString\n\t\tif not (coreGenOutputPath).exists():\n\t\t\tself.printVerbose(\" Creating output directory for core generator files.\")\n\t\t\tself.printDebug(\" Output directory: {0}.\".format(coreGenOutputPath))\n\t\t\tcoreGenOutputPath.mkdir(parents=True)\n\t\t\t\n\t\t# add the key Device to section SPECIAL at runtime to change interpolation results\n\t\tself.host.netListConfig['SPECIAL'] = {}\n\t\tself.host.netListConfig['SPECIAL']['Device'] = deviceString\n\t\tself.host.netListConfig['SPECIAL']['OutputDir'] = tempCoreGenPath.as_posix()\n\t\t\n\t\tif not self.host.netListConfig.has_section(str(pocEntity)):\n\t\t\tfrom configparser import NoSectionError\n\t\t\traise CompilerException(\"IP-Core '{0}' not found.\".format(str(pocEntity))) from NoSectionError(str(pocEntity))\n\t\t\n\t\t# read copy tasks\n\t\tcopyTasks = []\n\t\tcopyFileList = self.host.netListConfig[str(pocEntity)]['Copy']\n\t\tif (len(copyFileList) != 0):\n\t\t\tself.printDebug(\"CopyTasks: \\n \" + (\"\\n \".join(copyFileList.split(\"\\n\"))))\n\t\t\t\n\t\t\tcopyRegExpStr\t = r\"^\\s*(?P.*?)\"\t\t\t# Source filename\n\t\t\tcopyRegExpStr += r\"\\s->\\s\"\t\t\t\t\t\t\t\t\t\t\t\t\t#\tDelimiter signs\n\t\t\tcopyRegExpStr += r\"(?P.*?)$\"\t\t\t\t\t#\tDestination filename\n\t\t\tcopyRegExp = re.compile(copyRegExpStr)\n\t\t\t\n\t\t\tfor item in copyFileList.split(\"\\n\"):\n\t\t\t\tcopyRegExpMatch = copyRegExp.match(item)\n\t\t\t\tif (copyRegExpMatch is not None):\n\t\t\t\t\tcopyTasks.append((\n\t\t\t\t\t\tPath(copyRegExpMatch.group('SourceFilename')),\n\t\t\t\t\t\tPath(copyRegExpMatch.group('DestFilename'))\n\t\t\t\t\t))\n\t\t\t\telse:\n\t\t\t\t\traise CompilerException(\"Error in copy rule '{0}'\".format(item))\n\t\t\n\t\t# read replacement tasks\n\t\treplaceTasks = []\n\t\treplaceFileList = self.host.netListConfig[str(pocEntity)]['Replace']\n\t\tif (len(replaceFileList) != 0):\n\t\t\tself.printDebug(\"ReplacementTasks: \\n \" + (\"\\n \".join(replaceFileList.split(\"\\n\"))))\n\n\t\t\treplaceRegExpStr =\tr\"^\\s*(?P.*?)\\s+:\"\t\t\t# Filename\n\t\t\treplaceRegExpStr += r\"(?P[im]{0,2}):\\s+\"\t\t\t#\tRegExp options\n\t\t\treplaceRegExpStr += r\"\\\"(?P.*?)\\\"\\s+->\\s+\"\t\t#\tSearch regexp\n\t\t\treplaceRegExpStr += r\"\\\"(?P.*?)\\\"$\"\t\t\t\t\t# Replace regexp\n\t\t\treplaceRegExp = re.compile(replaceRegExpStr)\n\n\t\t\tfor item in replaceFileList.split(\"\\n\"):\n\t\t\t\treplaceRegExpMatch = replaceRegExp.match(item)\n\t\t\t\t\n\t\t\t\tif (replaceRegExpMatch is not None):\n\t\t\t\t\treplaceTasks.append((\n\t\t\t\t\t\tPath(replaceRegExpMatch.group('Filename')),\n\t\t\t\t\t\treplaceRegExpMatch.group('Options'),\n\t\t\t\t\t\treplaceRegExpMatch.group('Search'),\n\t\t\t\t\t\treplaceRegExpMatch.group('Replace')\n\t\t\t\t\t))\n\t\t\t\telse:\n\t\t\t\t\traise CompilerException(\"Error in replace rule '{0}'.\".format(item))\n\t\t\n\t\t# setup all needed paths to execute coreGen\n\t\tcoreGenExecutablePath =\t\tself.host.directories[\"ISEBinary\"] / self.__executables['CoreGen']\n\t\t\n\t\t# read netlist settings from configuration file\n\t\tipCoreName =\t\t\t\t\tself.host.netListConfig[str(pocEntity)]['IPCoreName']\n\t\txcoInputFilePath =\t\tself.host.directories[\"PoCRoot\"] / self.host.netListConfig[str(pocEntity)]['CoreGeneratorFile']\n\t\tcgcTemplateFilePath =\tself.host.directories[\"PoCNetList\"] / \"template.cgc\"\n\t\tcgpFilePath =\t\t\t\t\ttempCoreGenPath / \"coregen.cgp\"\n\t\tcgcFilePath =\t\t\t\t\ttempCoreGenPath / \"coregen.cgc\"\n\t\txcoFilePath =\t\t\t\t\ttempCoreGenPath / xcoInputFilePath.name\n\n\t\t# report the next steps in execution\n#\t\tif (self.getVerbose()):\n#\t\t\tprint(\" Commands to be run:\")\n#\t\t\tprint(\" 1. Write CoreGen project file into temporary directory.\")\n#\t\t\tprint(\" 2. Write CoreGen content file into temporary directory.\")\n#\t\t\tprint(\" 3. Copy IPCore's *.xco file into temporary directory.\")\n#\t\t\tprint(\" 4. Change working directory to temporary directory.\")\n#\t\t\tprint(\" 5. Run Xilinx Core Generator (coregen).\")\n#\t\t\tprint(\" 6. Copy resulting files into output directory.\")\n#\t\t\tprint(\" ----------------------------------------\")\n\t\t\n\t\tif (self.host.platform == \"Windows\"):\n\t\t\tWorkingDirectory = \".\\\\temp\\\\\"\n\t\telse:\n\t\t\tWorkingDirectory = \"./temp/\"\n\t\t\n\t\t# write CoreGenerator project file\n\t\tcgProjectFileContent = textwrap.dedent('''\\\n\t\t\tSET addpads = false\n\t\t\tSET asysymbol = false\n\t\t\tSET busformat = BusFormatAngleBracketNotRipped\n\t\t\tSET createndf = false\n\t\t\tSET designentry = VHDL\n\t\t\tSET device = {Device}\n\t\t\tSET devicefamily = {DeviceFamily}\n\t\t\tSET flowvendor = Other\n\t\t\tSET formalverification = false\n\t\t\tSET foundationsym = false\n\t\t\tSET implementationfiletype = Ngc\n\t\t\tSET package = {Package}\n\t\t\tSET removerpms = false\n\t\t\tSET simulationfiles = Behavioral\n\t\t\tSET speedgrade = {SpeedGrade}\n\t\t\tSET verilogsim = false\n\t\t\tSET vhdlsim = true\n\t\t\tSET workingdirectory = {WorkingDirectory}\n\t\t\t'''.format(\n\t\t\t\tDevice=device.shortName(),\n\t\t\t\tDeviceFamily=device.familyName(),\n\t\t\t\tPackage=(str(device.package) + str(device.pinCount)),\n\t\t\t\tSpeedGrade=device.speedGrade,\n\t\t\t\tWorkingDirectory=WorkingDirectory\n\t\t\t))\n\n\t\tself.printDebug(\"Writing CoreGen project file to '{0}'.\".format(cgpFilePath))\n\t\twith cgpFilePath.open('w') as cgpFileHandle:\n\t\t\tcgpFileHandle.write(cgProjectFileContent)\n\n\t\t# write CoreGenerator content? file\n\t\tself.printDebug(\"Reading CoreGen content file to '{0}'.\".format(cgcTemplateFilePath))\n\t\twith cgcTemplateFilePath.open('r') as cgcFileHandle:\n\t\t\tcgContentFileContent = cgcFileHandle.read()\n\t\t\t\n\t\tcgContentFileContent = cgContentFileContent.format(\n\t\t\tname=\"lcd_ChipScopeVIO\",\n\t\t\tdevice=device.shortName(),\n\t\t\tdevicefamily=device.familyName(),\n\t\t\tpackage=(str(device.package) + str(device.pinCount)),\n\t\t\tspeedgrade=device.speedGrade\n\t\t)\n\n\t\tself.printDebug(\"Writing CoreGen content file to '{0}'.\".format(cgcFilePath))\n\t\twith cgcFilePath.open('w') as cgcFileHandle:\n\t\t\tcgcFileHandle.write(cgContentFileContent)\n\t\t\n\t\t# copy xco file into temporary directory\n\t\tself.printDebug(\"Copy CoreGen xco file to '{0}'.\".format(xcoFilePath))\n\t\tself.printVerbose(\" cp {0} {1}\".format(str(xcoInputFilePath), str(tempCoreGenPath)))\n\t\tshutil.copy(str(xcoInputFilePath), str(xcoFilePath), follow_symlinks=True)\n\t\t\n\t\t# change working directory to temporary CoreGen path\n\t\tself.printVerbose(' cd {0}'.format(str(tempCoreGenPath)))\n\t\tos.chdir(str(tempCoreGenPath))\n\t\t\n\t\t# running CoreGen\n\t\t# ==========================================================================\n\t\tself.printNonQuiet(\" running CoreGen...\")\n\t\t# assemble CoreGen command as list of parameters\n\t\tparameterList = [\n\t\t\tstr(coreGenExecutablePath),\n\t\t\t'-r',\n\t\t\t'-b', str(xcoFilePath),\n\t\t\t'-p', '.'\n\t\t]\n\t\tself.printDebug(\"call coreGen: {0}.\".format(parameterList))\n\t\tself.printVerbose(' {0} -r -b \"{1}\" -p .'.format(str(coreGenExecutablePath), str(xcoFilePath)))\n\t\tif (self.dryRun == False):\n\t\t\tcoreGenLog = subprocess.check_output(parameterList, stderr=subprocess.STDOUT, universal_newlines=True)\n\t\t\n\t\t\tif self.showLogs:\n\t\t\t\tprint(\"Core Generator log (CoreGen)\")\n\t\t\t\tprint(\"--------------------------------------------------------------------------------\")\n\t\t\t\tprint(coreGenLog)\n\t\t\t\tprint()\n\t\t\n\t\t# copy resulting files into PoC's netlist directory\n\t\tself.printNonQuiet(' copy result files into output directory...')\n\t\tfor task in copyTasks:\n\t\t\t(fromPath, toPath) = task\n\t\t\tif not fromPath.exists(): raise CompilerException(\"Can not copy '{0}' to destination.\".format(str(fromPath))) from FileNotFoundError(str(fromPath))\n\t\t\t\n\t\t\ttoDirectoryPath = toPath.parent\n\t\t\tif not toDirectoryPath.exists():\n\t\t\t\ttoDirectoryPath.mkdir(parents=True)\n\t\t\n\t\t\tself.printVerbose(\" copying '{0}'.\".format(fromPath))\n\t\t\tshutil.copy(str(fromPath), str(toPath))\n\t\t\n\t\t# replace in resulting files\n\t\tself.printNonQuiet(' replace in result files...')\n\t\tfor task in replaceTasks:\n\t\t\t(fromPath, options, search, replace) = task\n\t\t\tif not fromPath.exists(): raise CompilerException(\"Can not replace in file '{0}' to destination.\".format(str(fromPath))) from FileNotFoundError(str(fromPath))\n\t\t\t\n\t\t\tself.printVerbose(\" replace in file '{0}': search for '{1}' -> replace by '{2}'.\".format(str(fromPath), search, replace))\n\t\t\t\n\t\t\tregExpFlags\t = re.DOTALL\n\t\t\tif ('i' in options):\n\t\t\t\tregExpFlags |= re.IGNORECASE\n\t\t\tif ('m' in options):\n\t\t\t\tregExpFlags |= re.MULTILINE\n\t\t\t\n\t\t\tregExp = re.compile(search, regExpFlags)\n\t\t\t\n\t\t\twith fromPath.open('r') as fileHandle:\n\t\t\t\tFileContent = fileHandle.read()\n\t\t\t\n\t\t\tNewContent = re.sub(regExp, replace, FileContent)\n\t\t\t\n\t\t\twith fromPath.open('w') as fileHandle:\n\t\t\t\tfileHandle.write(NewContent)\n\t\t", "sub_path": "py/Compiler/XCOCompiler.py", "file_name": "XCOCompiler.py", "file_ext": "py", "file_size_in_byte": 11035, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "lib.Functions.Exit.printThisIsNoExecutableFile", "line_number": 40, "usage_type": "call"}, {"api_name": "lib.Functions.Exit", "line_number": 40, "usage_type": "name"}, {"api_name": "Compiler.Base.PoCCompiler", "line_number": 49, "usage_type": "name"}, {"api_name": "configparser.NoSectionError", "line_number": 98, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 109, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 115, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 116, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 131, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 138, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 174, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 225, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 229, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 244, "usage_type": "call"}, {"api_name": "subprocess.STDOUT", "line_number": 244, "usage_type": "attribute"}, {"api_name": "shutil.copy", "line_number": 263, "usage_type": "call"}, {"api_name": "re.DOTALL", "line_number": 273, "usage_type": "attribute"}, {"api_name": "re.IGNORECASE", "line_number": 275, "usage_type": "attribute"}, {"api_name": "re.MULTILINE", "line_number": 277, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 279, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 284, "usage_type": "call"}]} +{"seq_id": "167846406", "text": "import discord\r\nfrom discord.ext import commands\r\nimport os\r\nimport random\r\nimport asyncio\r\nfrom discord.utils import get\r\nimport datetime\r\nimport smtplib\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email.mime.text import MIMEText\r\nimport socket\r\nfrom discord.utils import find\r\nfrom pymongo import MongoClient\r\n\r\nclient = commands.Bot( command_prefix = '=')\r\nclient.remove_command('help')\r\nguild_subscriptions = True\r\n\r\nclu= os.environ.get('MONGODB_URI')\r\ncluster = MongoClient(clu)\r\ndb = cluster[\"topianbot\"]\r\ncollection = db[\"money\"]\r\ncollectionmodules = db[\"modules\"]\r\ncollectionshop = db[\"shop\"]\r\ncollectionticket = db[\"ticket\"]\r\ncollectionlogschannels = db[\"logschannels\"]\r\ncollectionreaction = db[\"reaction\"]\r\ncollectionroles = db[\"roles\"]\r\n\t \t\t\t\t\r\n@client.event\r\nasync def on_redy():\r\n print( 'Bot connected')\r\n\r\n \r\n@client.command()\r\nasync def servers(ctx, arg = None):\r\n user = int(550061958938886175)\r\n author = int(ctx.author.id)\r\n if author == user:\r\n for guild in client.guilds:\r\n await ctx.send(guild)\r\n await ctx.send(guild.id)\r\n\r\n else:\r\n await ctx.send(f\"Вы не создатель бота!\")\r\n\r\n@client.command()\r\nasync def invite(ctx, arg = None):\r\n user = int(550061958938886175)\r\n author = int(ctx.author.id)\r\n if author == user:\r\n for guild in client.guilds:\r\n idi = int(guild.id)\r\n argd = int(arg)\r\n if idi == argd:\r\n await ctx.send(f\"ok!\")\r\n for channel in guild.text_channels:\r\n if channel.permissions_for(guild.me).send_messages:\r\n invite = await channel.create_invite()\r\n await ctx.send(invite)\r\n break\r\n else:\r\n await ctx.send(f\"Вы не создатель бота!\")\t\r\n\t\r\n@client.command()\r\nasync def send(ctx, arg = None, *, argg):\r\n user = int(550061958938886175)\r\n author = int(ctx.author.id)\r\n if author == user:\r\n for guild in client.guilds:\r\n await ctx.send(guild)\r\n idi = int(guild.id)\r\n argd = int(arg)\r\n if idi == argd:\r\n await ctx.send(f\"Сообщение отправлено!\")\r\n for channel in guild.text_channels:\r\n if channel.permissions_for(guild.me).send_messages:\r\n await channel.send(argg)\r\n break\r\n else:\r\n await ctx.send(f\"Вы не создатель бота!\")\r\n \r\n@client.command()\r\nasync def new(ctx, *, argg):\r\n if not argg:\r\n await ctx.send(f\"=new arg\")\r\n user = int(550061958938886175)\r\n author = int(ctx.author.id)\r\n if author == user:\r\n for guild in client.guilds:\r\n for channel in guild.text_channels:\r\n if channel.permissions_for(guild.me).send_messages:\r\n await channel.send(argg)\r\n await ctx.send(f\"Сообщение отправлено!\")\r\n break\r\n else:\r\n await ctx.send(f\"Вы не создатель бота!\") \t\r\n\t\r\n\t\r\n\t\r\n@client.event\r\nasync def on_guild_join(guild):\r\n for channel in guild.text_channels:\r\n if channel.permissions_for(guild.me).send_messages:\r\n message = await channel.send(embed = discord.Embed(description = f\"\"\"Привет! Я Topian Bot, чтобы узнать мои команды напиши ``=help``\"\"\"))\r\n invite = await channel.create_invite()\r\n\r\n embed = discord.Embed(title=':white_check_mark: бота пригласили на новый сервер!', type='rich', color=0x2ecc71) #Green\r\n embed.set_thumbnail(url=guild.icon_url)\r\n embed.add_field(name='Name', value=guild.name, inline=True)\r\n embed.add_field(name='ID', value=guild.id, inline=True)\r\n embed.add_field(name='Создатель сервера', value=f'{guild.owner}', inline=True)\r\n embed.add_field(name='Регион', value=guild.region, inline=True)\r\n embed.add_field(name='Людей на сервере', value=guild.member_count, inline=True)\r\n embed.add_field(name='Сервер создан', value=guild.created_at, inline=True)\r\n embed.add_field(name= 'Приглашение на сервер', value=invite, inline=True)\r\n channel = client.get_channel( 765246160235003936 )\r\n await channel.send(embed=embed)\r\n break\r\n\r\n\t\t\r\n@client.command()\r\nasync def status(ctx, * , arg):\r\n user = int(550061958938886175)\r\n author = int(ctx.author.id)\r\n if author == user: \r\n activity = discord.Activity(name= arg, type=discord.ActivityType.watching)\r\n await client.change_presence(activity=activity)\t\r\n else:\r\n await ctx.send(f\"Вы не создатель бота!\") \r\n\t\r\n\r\n\r\n\t\t\r\n\t\r\n@client.command()\r\nasync def load(ctx, extensions):\r\n client.load_extensions(f'cogs.{extensions}')\r\n await ctx.send(\"loaded\")\r\n\r\n@client.command()\r\nasync def unload(ctx, extensions):\r\n client.unload_extension(f'cogs.{extensions}')\r\n await ctx.send('unloaded')\r\n \r\n \r\n@client.command()\r\nasync def reload(ctx, extensions):\r\n client.unload_extension(f'cogs.{extensions}')# отгружаем ког\r\n client.load_extension(f'cogs.{extensions}')# загружаем \r\n await ctx.send('reloaded')\r\n\r\n\r\n #join to channel\r\n@client.command()\r\nasync def join(ctx):\r\n global voise\r\n channel = ctx.message.author.voice.channel\r\n voice = get(client.voice_clients, guild = ctx.guild)\r\n\r\n if voice and voice.is_connected():\r\n await voice.move_to(channel)\r\n else:\r\n voice = await channel.connect()\r\n await ctx.send(f'Бот присоеденился к каналу: {channel}')\r\n\r\n #leave from channel \r\n@client.command()\r\nasync def leave(ctx):\r\n channel = ctx.message.author.voice.channel\r\n voice = get(client.voice_clients, guild = ctx.guild)\r\n\r\n if voice and voice.is_connected():\r\n await voice.disconnect()\r\n else:\r\n voice = await channel.connect()\r\n await ctx.send(f'Бот отключился от канала: {channel}')\r\n\r\n\t\r\n\t\r\n\r\n\r\n\r\n\t\r\n@client.event\r\nasync def on_command_error(ctx, err):\r\n\r\n if isinstance(err, commands.BotMissingPermissions):\r\n await ctx.send(embed=discord.Embed(description=f\"У бота отсутствуют права: {' '.join(err.missing_perms)}\\nВыдайте их ему для полного функционирования бота\"))\r\n\r\n elif isinstance(err, commands.MissingPermissions):\r\n await ctx.send(embed=discord.Embed(description=f\"У вас недостаточно прав для запуска этой команды!\"))\r\n\r\n elif isinstance(err, commands.CommandOnCooldown):\r\n await ctx.send(embed=discord.Embed(description=f\"У вас еще не прошел кулдаун на команду {ctx.command}!\\nПодождите еще {err.retry_after:.2f}\"))\r\n\t\r\n else:\r\n await ctx.send(embed=discord.Embed(description=f\"Произошла неизвестная ошибка: `{err}`\\nПожалуйста, свяжитесь с разработчиками для исправления этой ошибки\"))\r\n\r\n\r\nfor filename in os.listdir('./cogs'): # Цикл перебирающий файлы в cogs\r\n client.load_extension(f'cogs.{filename[:-3]}') \r\n \r\n\r\ntoken= os.environ.get('BOT_TOKEN')\r\nclient.run( token )\r\n", "sub_path": "botfile.py", "file_name": "botfile.py", "file_ext": "py", "file_size_in_byte": 7523, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "discord.ext.commands.Bot", "line_number": 15, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 15, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 19, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pymongo.MongoClient", "line_number": 20, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 105, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 108, "usage_type": "call"}, {"api_name": "discord.Activity", "line_number": 127, "usage_type": "call"}, {"api_name": "discord.ActivityType", "line_number": 127, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 159, "usage_type": "call"}, {"api_name": "discord.utils.get", "line_number": 171, "usage_type": "call"}, {"api_name": "discord.ext.commands.BotMissingPermissions", "line_number": 188, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 188, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 189, "usage_type": "call"}, {"api_name": "discord.ext.commands.MissingPermissions", "line_number": 191, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 191, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 192, "usage_type": "call"}, {"api_name": "discord.ext.commands.CommandOnCooldown", "line_number": 194, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 194, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 195, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 198, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 201, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 205, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 205, "usage_type": "attribute"}]} +{"seq_id": "631583328", "text": "#!/usr/bin/env python\n\n# To use: python xml_unique_line_counter.py --file C:\\file_path_here\\GitHub\\spni.github.io\\opponents\\character_name\\behaviour.xml\n# Verbose: python xml_unique_line_counter.py --file C:\\file_path_here\\GitHub\\spni.github.io\\opponents\\character_name\\behaviour.xml --verbose\n\n# Parser:\n# pip install html5lib\n# pip install beautifulsoup4\n\n\nfrom bs4 import BeautifulSoup\nimport os\nimport sys\nimport getopt\nimport logging\nfrom collections import Counter\n\nlogger = logging.getLogger(os.path.basename(__file__))\n\n\ndef parse(f):\n\n l_ = []\n with open(f, 'r') as hlr:\n f_ = hlr.read()\n\n logger.debug(\"Read file: ******\")\n logger.debug(f_)\n\n logger.debug(\"Parsing now: ******\")\n soup = BeautifulSoup(f_, 'html5lib')\n for c, s in enumerate(soup.find_all('state')):\n text_ = s.text.strip()\n logger.debug('Found text: {}. Count: {}'.format(text_.encode('utf-8'), c))\n l_.append(text_)\n logger.debug('**** Count *****')\n d = dict(Counter(l_))\n ctr = 1\n\n for k, v in d.iteritems():\n logger.info('{} --> Count: {}, Line count: {}'.format(k.encode('utf-8'), v, ctr))\n ctr += 1\n logger.info('Unique dialogue count: {}'.format(len(d)))\n\n\nif __name__ == '__main__':\n verbose = None\n\n log_file = os.path.join(os.path.dirname(__file__),\"output.log\")\n file_hndlr = logging.FileHandler(log_file)\n logger.addHandler(file_hndlr)\n console = logging.StreamHandler(stream=sys.stdout)\n logger.addHandler(console)\n ch = logging.Formatter('[%(levelname)s] %(message)s')\n console.setFormatter(ch)\n file_hndlr.setFormatter(ch)\n\n argv = sys.argv[1:]\n opts, args = getopt.getopt(argv, \"d:vf:\", [\"download=\", \"verbose\", \"file=\"])\n for opt, arg in opts:\n if opt in (\"-v\", \"--verbose\"):\n verbose = True\n elif opt in (\"-f\", \"--file\"):\n file_ = arg\n\n if verbose:\n logger.setLevel(logging.getLevelName('DEBUG'))\n else:\n logger.setLevel(logging.getLevelName('INFO'))\n logger.debug('CLI args: {}'.format(opts))\n parse(file_)\n", "sub_path": "opponents/xml_unique_line_counter.py", "file_name": "xml_unique_line_counter.py", "file_ext": "py", "file_size_in_byte": 2079, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "logging.getLogger", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "bs4.BeautifulSoup", "line_number": 31, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 49, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 50, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 52, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 52, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 54, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 58, "usage_type": "attribute"}, {"api_name": "getopt.getopt", "line_number": 59, "usage_type": "call"}, {"api_name": "logging.getLevelName", "line_number": 67, "usage_type": "call"}, {"api_name": "logging.getLevelName", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "378161100", "text": "import requests\nimport bs4\nimport re,os\ngetboard=input('请输入需要爬取图片的版面ID:')\ngetPageID=input('请输入需要爬取图片的页面ID:')\ngetstart=int(input('请输入爬取页码起始值:'))\ngetend=int(input('请输入爬取页码结束值:'))\nindex=0\ndirpath = r'/home/lordshi/下载/img5'\nif(os.path.exists(dirpath)):\n 1\nelse:\n os.makedirs(dirpath)\ndata={'username':'jydlzy','password':312362,'userhidden':2}\nss=requests.Session()\nheaders = {\n 'Host':'www.cc98.org',\n 'Connection':'keep-alive',\n 'Upgrade-Insecure-Requests': 1,\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Referer': 'http://www.cc98.org/list.asp?boardid=147',\n 'Accept-Encoding': 'gzip, deflate, sdch',\n 'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6,en-US;q=0.4'\n }\npos=ss.post('http://www.cc98.org/login.asp?action=chk',data=data,headers=headers)\ncookie=dict(pos.cookies)\n#print((cookie))\n###################图片下载代码##########################\ndef downloadpic(url1229):\n get_thing = ss.get(url1229, headers=headers, cookies=cookie)\n html_get = get_thing.content\n u_get = str(html_get, 'utf-8')\n img_urls_get = re.findall(r'(http://file.cc98.org/uploadfile/.*?jpg)', u_get)\n for url in img_urls_get:\n print(\"Downloading:\", url)\n try:\n res = requests.get(url)\n if str(res.status_code)[0] == \"4\":\n print(\"未下载成功:\", url)\n continue\n except Exception as e:\n print(\"未下载成功:\", url)\n filename = os.path.join(dirpath, str(index) + \".jpg\")\n\t#with open xxxxxxxxxxxxx值得学习#########\n with open(filename, 'wb') as f:\n f.write(res.content)\n global index\n index+=1\n##############URL获取########################################################################\nfor i in range(getstart,getend+1):\n url='http://www.cc98.org/dispbbs.asp?'+'BoardID='+getboard+r'&id='+getPageID+r'&star='+str(i)\n res=ss.get(url,headers=headers,cookies=cookie)\n ################################################\n #看不懂后面语句在干吗\n #################################################\n #print(url)\n #html=bs4.BeautifulSoup(res.content,\"html.parser\")\n\n # def has_class_but_no_id(tag):\n # return tag.has_attr('id') and tag.has_attr('title') and tag.has_attr('href')\n # content=html.find_all(has_class_but_no_id)\n #result1=re.findall(r'[upload=jpg](http://file.cc98.org/uploadfile/(.*?).jpg)[/upload]',str(html))\n #result1=content[5:]\n # result2=[]\n # for location1 in result1:\n # a = re.findall('href=\"(.*?)\"',str(location1))[0]\n # result2.append(a)\n\n # for u in result2:\n # urlselect=(r'http://www.cc98.org/'+u).replace('&','&')\n # print((urlselect))\n downloadpic(url)\nprint(\"下载结束,一共 %s 张图片\" % index)\n", "sub_path": "downloadpic.py", "file_name": "downloadpic.py", "file_ext": "py", "file_size_in_byte": 3062, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "os.path.exists", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 13, "usage_type": "call"}, {"api_name": "requests.Session", "line_number": 15, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 34, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}]} +{"seq_id": "138650101", "text": "import pandas as pd\n\nfrom zipline.data.bundles import register\nfrom zipline.data.bundles.csvdir import csvdir_equities\n\nstart_session = pd.Timestamp('2017-01-01 00:00:00', tz='utc')\nend_session = pd.Timestamp('2017-12-31 23:59:00', tz='utc')\n\nregister(\n 'crypto-bundle',\n csvdir_equities(\n ['minute'],\n '/path/to/your/csvs',\n ),\n calendar_name='NYSE', # US equities\n start_session=start_session,\n end_session=end_session\n)", "sub_path": "section 0006/extension.py", "file_name": "extension.py", "file_ext": "py", "file_size_in_byte": 454, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pandas.Timestamp", "line_number": 6, "usage_type": "call"}, {"api_name": "pandas.Timestamp", "line_number": 7, "usage_type": "call"}, {"api_name": "zipline.data.bundles.register", "line_number": 9, "usage_type": "call"}, {"api_name": "zipline.data.bundles.csvdir.csvdir_equities", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "283827358", "text": "import uuid\nimport tornado.web\nimport sclhub.redis_request_handler\nimport sclhub.tools.user\n\nclass IndexHandler(tornado.web.RequestHandler):\n redis = None\n loader = None\n mongo = None\n\n def initialize(self, loader, redis, mongo):\n self.redis = redis\n self.loader = loader\n self.mongo = mongo\n def get(self):\n username = self.redis.get(\"session.\" + str(self.get_cookie(\"session_data\")))\n #print(username)\n try:\n logout = self.get_argument(\"logout\")\n except tornado.web.MissingArgumentError:\n logout = 0\n if username is not None and not logout:\n self.redirect(\"/\"+username.decode(\"utf-8\"));\n else:\n self.redis.delete(\"session.{}\".format(self.get_cookie(\"session_data\")))\n self.clear_cookie(\"session_data\")\n token = str(uuid.uuid4()).replace(\"-\", \"\")\n self.redis.hset(\"tokens\", self.request.remote_ip, token)\n html = self.loader.load(\"auth.html\").generate(token=token)\n self.write(html)\n\nclass ProfileHandler(sclhub.redis_request_handler.RedisRequestHandler):\n redis = None\n loader = None\n mongo = None\n\n def initialize(self, loader, redis, mongo):\n self.redis = redis\n self.loader = loader\n self.mongo = mongo\n\n @tornado.web.authenticated\n def get(self, username):\n c_username = self.redis.get(\"session.\" + self.get_cookie(\"session_data\"))\n c_user = sclhub.tools.user.User(c_username, self.redis, self.mongo)\n print(c_user)\n html = self.loader.load(\"profile.html\").generate(\n profile=user.profile,\n current_user=c_user\n )\n self.write(html)\n\n def post(self, username):\n # print(self.request.files)\n self.write(\"{}\")", "sub_path": "sclhub/handlers/index.py", "file_name": "index.py", "file_ext": "py", "file_size_in_byte": 1813, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "tornado.web.web", "line_number": 6, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 6, "usage_type": "name"}, {"api_name": "tornado.web.web", "line_number": 20, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 20, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 27, "usage_type": "call"}, {"api_name": "sclhub.redis_request_handler.redis_request_handler", "line_number": 32, "usage_type": "attribute"}, {"api_name": "sclhub.redis_request_handler", "line_number": 32, "usage_type": "name"}, {"api_name": "sclhub.redis_request_handler.tools.user.User", "line_number": 45, "usage_type": "call"}, {"api_name": "sclhub.redis_request_handler.tools", "line_number": 45, "usage_type": "attribute"}, {"api_name": "sclhub.redis_request_handler", "line_number": 45, "usage_type": "name"}, {"api_name": "tornado.web.web", "line_number": 42, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "40000078", "text": "\"\"\"\nthis is an package\n\"\"\"\nimport os\nimport configparser\nimport sys\nfrom .const import *\nimport time\nimport utils\n\ncf = configparser.ConfigParser()\ncf.read(SRC_PATH + '/setting/config.ini')\n\n\nclass Task(object):\n\n def __init__(self):\n \"\"\"初始化各个下载任务的目录\"\"\"\n # self.provider = '' # 每个项目的 Provider\n # self.proxy = '' #每个项目所需要的代理IP(针对特定采集源)\n try:\n if self.provider:\n self.project_name = self.provider\n except AttributeError as e:\n self.project_name = sys.argv[0][:-3]\n else:\n self.project_name = sys.argv[0][:-3]\n self.cf = cf\n update_path = \"update\"\n project_filepath = os.path.join(FILE_DOWNLOAD_PATH, self.project_name)\n self.index_path = os.path.join(project_filepath, 'index', update_path)\n self.list_path = os.path.join(project_filepath, 'list', update_path)\n self.detail_path = os.path.join(project_filepath, 'detail', update_path)\n self.cover_path = os.path.join(project_filepath, 'cover', update_path)\n self.html_path = os.path.join(project_filepath, 'html', update_path)\n self.data_path = os.path.join(FILE_DOWNLOAD_PATH, 'products') # 成品目录\n\n self.template_file = os.path.join(\n self.data_path,\n self.project_name + '_' + update_path + '.db3',)\n\n def notify(self, dst, func, msg):\n \"\"\"\n 通知,将一些消息送至目的地(dst)\n \"\"\"\n pass\n\n def init_db(self, type_):\n return utils.init_db(type_, self.provider)\n\n\nclass Download(Task):\n \"\"\"\n 下载类\n \"\"\"\n\n def down_list(self):\n \"\"\"下载列表页\"\"\"\n if not os.path.exists(self.list_path):\n os.makedirs(self.list_path)\n\n def down_index(self):\n \"\"\"下载索引页\"\"\"\n if not os.path.exists(self.index_path):\n os.makedirs(self.index_path)\n\n def down_detail(self):\n \"\"\"下载详情页\"\"\"\n if not os.path.exists(self.detail_path):\n os.makedirs(self.detail_path)\n\n def down_cover(self):\n \"\"\"下载封面\"\"\"\n if not os.path.exists(self.cover_path):\n os.makedirs(self.cover_path)\n\n def down_html(self):\n \"\"\"下载起始页\"\"\"\n if not os.path.exists(self.html_path):\n os.makedirs(self.html_path)\n\n\nclass Parse(Task):\n \"\"\"\n 解析类\n \"\"\"\n\n def parse_html(self):\n \"\"\"解析起始页\"\"\"\n if not os.path.exists(self.html_path):\n raise FileNotFoundError('You must download the html_page(起始页) first.')\n\n def parse_list(self):\n \"\"\"解析列表页\"\"\"\n if not os.path.exists(self.list_path):\n raise FileNotFoundError('You must download the list_page(列表页) first.')\n\n def parse_index(self):\n \"\"\"解析索引页\"\"\"\n if not os.path.exists(self.index_path):\n raise FileNotFoundError('You must download the index_page(索引页) first.')\n\n def parse_detail(self):\n \"\"\"解析详情页\"\"\"\n if not os.path.exists(self.detail_path):\n raise FileNotFoundError('You must download the detail_page(详情页) first.')\n if not os.path.exists(self.template_file):\n import shutil\n shutil.copy(self.cf.get(\"path\", \"template_path\"), self.template_file)\n\n def clear(self):\n \"\"\"将update文件夹更改为当天的时间(ymd(年月日))\n \"\"\"\n new_dirname = time.strftime(\"%Y%m%d\")\n dir_list = [\n self.index_path,\n self.list_path,\n self.html_path,\n self.detail_path,\n self.cover_path,]\n for dir_ in dir_list:\n if os.path.exists(dir_):\n os.rename(dir_, os.path.dirname(dir_) + '/' + new_dirname)\n", "sub_path": "src/utils/task.py", "file_name": "task.py", "file_ext": "py", "file_size_in_byte": 3849, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "configparser.ConfigParser", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 25, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "utils.init_db", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path", "line_number": 100, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 107, "usage_type": "call"}, {"api_name": "os.path", "line_number": 107, "usage_type": "attribute"}, {"api_name": "shutil.copy", "line_number": 109, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path", "line_number": 122, "usage_type": "attribute"}, {"api_name": "os.rename", "line_number": 123, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 123, "usage_type": "call"}, {"api_name": "os.path", "line_number": 123, "usage_type": "attribute"}]} +{"seq_id": "643584528", "text": "#coding:utf-8\nimport re\nimport time\nimport json\nimport logging\nimport platform\nimport unittest\nimport requests\nfrom BeautifulReport import BeautifulReport\nimport configparser\n\n# 指定日志输出级别\nlogging.basicConfig(level=logging.INFO)\n\n# 根据系统环境走不同的目录路径来写测试报告\nif (platform.system() == 'Windows'):\n print(platform.system())\n report_dir = \"D:\\\\uitest\\\\report\"\n print(report_dir)\n # 读配置文件信息\n configInfo = \"C:\\\\shh\\\\uitest\\\\panwenjie\\\\config.ini\"\n config = configparser.ConfigParser()\n config.read(configInfo, encoding='UTF-8')\n code = config.get(\"Info\", \"vcode\")\n orgId = config.get(\"Info\", \"TADoneorgid\")\n username = config.get(\"Info\", \"TADusername\")\n\nelif (platform.system() == 'Linux'):\n report_dir = '/opt/uitest/report'\n print(platform.system())\nelse:\n print(platform.system())\n\n# 开始测试\nclass algoTestdemo(unittest.TestCase):\n #获取token\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testdzcp(self):\n '''测试1.0定制测评'''\n # 请求头\n self.headers = {'content-type': 'application/json;charset=UTF-8'};\n # 教师端登录\n\n loginOrgPost = {'username': username, 'code': code, 'systemId': 11, 'orgId': orgId}\n loginOrgResponse = requests.post(\"https://autotest.learnta.cn/__api/auth/user/loginOrg\",\n data=json.dumps(loginOrgPost), headers=self.headers)\n print(loginOrgResponse.status_code)\n self.token = loginOrgResponse.json()['data']['access_token']\n print(self.token)#登录码\n\n #请求头\n headers = {'content-type': 'application/json;charset=UTF-8', 'authorization': 'Bearer ' + self.token}\n # 发送卡片\n qwes = {'topicInfoId': \"3496\", 'kpointIds': [6000050826, 6000050827, 6000050828], 'orgId': orgId,'examType':\"newExam\",'topicName':\"Unit 1 情景交际\"}\n data4 = requests.post('https://autotest.learnta.cn/__api/beatV3/topic/diygo/create', data=json.dumps(qwes),\n headers=headers)\n print(data4.status_code)\n\n cardid = data4.json()['shareUrl']\n # 正则匹配后端返回的goUrl中的csteid\n pattern = re.findall('m/(.+?)$', cardid) # 匹配规则cste=开头到&之间的字符串\\\n result = pattern[0]\n print(result)\n\n # print(self.kpid)#卡片id\n # print(data4.status_code)#请求是否成功\n # 做题时登录\n\n loginOrgPostdata = {'openId': \"\", 'randomCode': result, 'shareType': \"chargeExam\", 'username': '算法测试','mobile': username,'code': code}\n loginOrgResponsedata = requests.post(\"https://learnta.cn/__api/wechat/public/wechat/qRcode/login\",\n data=json.dumps(loginOrgPostdata), headers=self.headers)\n\n print(loginOrgResponsedata.status_code)\n\n #\n #获取cste\n huoqkeid = requests.get('https://learnta.cn/__api/beatV3/topic/goExam/'+str(result)\n ,headers=headers)\n cste = huoqkeid.json()['cste']\n\n #获取question\n qqbw = {'userAnswers': [], 'isEvaluated': 'true', 'isRecommIfNoWeakPoints': 'false','taskExecutionId': cste}\n questionasda = requests.post('https://learnta.cn/__api/beatV3/student/task/12/' + str(cste) + '/question',\n data=json.dumps(qqbw), headers=headers)\n question = questionasda.json()['data']['questions']\n questionId = questionasda.json()['data']['questions'][0]['id']\n\n # 做题\n while question is not None:\n #做题\n data2 = {'startTime':time.time(),'finishTime': time.time(), 'isSubmit': 0,'questionId': questionId, 'answer': [],'sourceOf': '','parentQuestionId': questionId}\n data = requests.put('https://learnta.cn/__api/beatV3/student/task/12/'+str(cste)+'/answer',data = json.dumps(data2),headers = headers)\n logging.info(data.json())\n print(data.status_code)\n #获取questionid\n\n qqbw = {'userAnswers': [], 'isEvaluated': 'true', 'isRecommIfNoWeakPoints': 'false',\n 'taskExecutionId': cste}\n questionasda = requests.post('https://learnta.cn/__api/beatV3/student/task/12/' + str(cste) + '/question',\n data=json.dumps(qqbw), headers=headers)\n question = questionasda.json()['data']['questions']\n if question is not None:\n questionId = questionasda.json()['data']['questions'][0]['id']\n\n\n ads = {'shareType': \"chargeExam\"}\n huoqkeid = requests.put(\n 'https://learnta.cn/__api/beatV3/classroom/task/exam/addStudent/'+str(cste)\n ,data = json.dumps(ads), headers=headers)\n print(huoqkeid.status_code)\n\n adnis = requests.get(\n 'https://learnta.cn/__api/beatV3/public/student/task/12/'+str(cste)+'/report'\n ,headers=headers)\n print(adnis.status_code)\n\n testResult = adnis.json()['message']\n # 断言测试结果\n self.assertEqual(testResult, 'success', msg=\"练习卡做题失败\")\n\n\ntestsuite = unittest.TestSuite()\ntestsuite.addTests(unittest.makeSuite(algoTestdemo))\n\n#使用的report方法输出HTML格式的报告\nrunner = BeautifulReport((testsuite))\n#runner.report(filename='KuPiWEB端测试.html', description='酷培学生端测试用例',log_path=report_dir)\n# filename = time.strftime(\"%Y-%m-%d-%H-%M-%S\")+r\".html\"\n\n\n\n# 根据系统环境判断需要读取的目录路径\nif(platform.system()=='Windows'):\n report_dir = 'C:\\\\python1\\\\day06'\n filename = \"C:\\\\python1\\\\day05\"\n print(platform.system())\n runner.report(filename='算法接口测试.html', description='算法接口测试', log_path=report_dir)\n print(report_dir)\n print(filename)\nelif(platform.system()=='Linux'):\n report_dir = \"/opt/uitest/report\"\n filename = '/opt/uitest/img'\n runner.report(filename='算法接口测试.html', description='算法接口测试', report_dir=report_dir)\n print(report_dir)\n print(filename)\n print(platform.system())\nelse:\n print(platform.system())", "sub_path": "panwenjie/Is suit/TADone/Adzcpk.py", "file_name": "Adzcpk.py", "file_ext": "py", "file_size_in_byte": 6211, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "logging.basicConfig", "line_number": 13, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 13, "usage_type": "attribute"}, {"api_name": "platform.system", "line_number": 16, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 17, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 22, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 28, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 30, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 32, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 35, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 50, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 51, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 60, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 60, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 66, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 75, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 76, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 82, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 88, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 89, "usage_type": "call"}, {"api_name": "time.time", "line_number": 96, "usage_type": "call"}, {"api_name": "requests.put", "line_number": 97, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 97, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 98, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 104, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 105, "usage_type": "call"}, {"api_name": "requests.put", "line_number": 112, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 114, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 117, "usage_type": "call"}, {"api_name": "unittest.TestSuite", "line_number": 127, "usage_type": "call"}, {"api_name": "unittest.makeSuite", "line_number": 128, "usage_type": "call"}, {"api_name": "BeautifulReport.BeautifulReport", "line_number": 131, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 138, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 141, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 145, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 151, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 153, "usage_type": "call"}]} +{"seq_id": "415003949", "text": "from django.contrib import admin\nfrom django.db.models import F\nfrom .models import Account, HTTPRequest, ModelInfo\n\nadmin.site.register(Account)\nadmin.site.register(ModelInfo)\n\n\nclass HttpRequestAdmin(admin.ModelAdmin):\n actions = ['incr_priority', 'decr_priority']\n ordering = ['-priority']\n\n def incr_priority(self, request, queryset):\n queryset.update(priority=F('priority') + 1)\n self.message_user(request, 'Priority increased')\n\n def decr_priority(self, request, queryset):\n queryset.update(priority=F('priority') - 1)\n self.message_user(request, 'Priority decreased')\n\n incr_priority.short_description = \"Increase priority by 1\"\n decr_priority.short_description = 'Decrease priority by 1'\n\n\nadmin.site.register(HTTPRequest, HttpRequestAdmin)\n", "sub_path": "django_hello_world/hello/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 795, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.contrib.admin.site.register", "line_number": 5, "usage_type": "call"}, {"api_name": "models.Account", "line_number": 5, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 5, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 6, "usage_type": "call"}, {"api_name": "models.ModelInfo", "line_number": 6, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 6, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 6, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 9, "usage_type": "name"}, {"api_name": "django.db.models.F", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.models.F", "line_number": 18, "usage_type": "call"}, {"api_name": "django.contrib.admin.site.register", "line_number": 25, "usage_type": "call"}, {"api_name": "models.HTTPRequest", "line_number": 25, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "205074087", "text": "# -*- coding: utf-8 -*-\n__all__ = (\"Package\", \"PackageDatabase\")\n\nimport typing\nfrom abc import ABC, abstractmethod\nfrom typing import (\n Any,\n Collection,\n Dict,\n Generic,\n Iterator,\n List,\n Mapping,\n Optional,\n)\n\nimport attr\nfrom loguru import logger\n\nfrom .action import ActionFormat\nfrom .msg import MsgFormat\nfrom .srv import SrvFormat\n\nif typing.TYPE_CHECKING:\n from .. import AppInstance\n\nMF = typing.TypeVar(\"MF\", bound=MsgFormat)\nSF = typing.TypeVar(\"SF\", bound=SrvFormat)\nAF = typing.TypeVar(\"AF\", bound=ActionFormat)\n\n\nclass Package(Generic[MF, SF, AF], ABC):\n name: str\n path: str\n messages: Collection[MF]\n services: Collection[SF]\n actions: Collection[AF]\n\n @classmethod\n @abstractmethod\n def from_dict(cls, dict: Dict[str, Any]) -> \"Package\":\n ...\n\n def to_dict(self) -> Dict[str, Any]:\n d = {\n \"name\": self.name,\n \"path\": self.path,\n \"messages\": [m.to_dict() for m in self.messages],\n \"services\": [s.to_dict() for s in self.services],\n \"actions\": [a.to_dict() for a in self.actions],\n }\n return d\n\n\nPT = typing.TypeVar(\"PT\", bound=Package)\n\n\n@attr.s(frozen=True)\nclass PackageDatabase(Generic[PT], ABC, Mapping[str, PT]):\n \"\"\"\n An immutable database of packages, represented as :class:`Package`\n instances, indexed by their names, given as :class:`str`.\n\n Note\n ----\n Implements most :class:`dict` operations via\n :class:`abc.collections.Mapping`,\n including :code:`db['name']`, :code:`len(db)`), :code:`db.keys()`,\n :code:`db.values()`, and :code:`iter(db)`.\n As instances of this class are immutable, no destructive\n :class:`dict` operations are provided (e.g., :code:`del db['foo'])`\n and `db['foo'] = bar`).\n \"\"\"\n\n _contents: Mapping[str, PT] = attr.ib()\n\n @classmethod\n def from_packages(cls,\n packages: typing.Iterable[PT]\n ) -> \"PackageDatabase[PT]\":\n return cls({p.name: p for p in packages})\n\n @classmethod\n def from_paths(cls,\n app_instance: \"AppInstance\",\n paths: List[str],\n ignore_bad_paths: bool = True,\n ) -> \"PackageDatabase[PT]\":\n \"\"\"\n Constructs a package database from a list of the paths of the packages\n belonging to the database.\n\n Parameters\n ----------\n app_instance: AppInstance\n an instance of an application from which to get\n the package database\n paths: List[str]\n a list of the absolute paths of the packages.\n ignore_bad_paths: bool\n If :code:`True`, non-existent paths will be ignored.\n If :code:`False`, a :exc:`FileNotFoundError` will be raised.\n\n Raises\n ------\n FileNotFoundError\n if no package is found at a given path.\n \"\"\"\n packages: List[PT] = []\n for p in paths:\n try:\n package = cls._build_package(app_instance, p)\n except FileNotFoundError:\n logger.exception(f\"unable to build package: {p}\")\n if not ignore_bad_paths:\n raise\n else:\n packages.append(package)\n return cls.from_packages(packages)\n\n @classmethod\n @abstractmethod\n def _build_package(cls, app_instance: \"AppInstance\", path: str) -> PT:\n ...\n\n @classmethod\n def build(cls,\n app_instance: \"AppInstance\",\n paths: Optional[List[str]] = None\n ) -> \"PackageDatabase[PT]\":\n if paths is None:\n paths = cls._determine_paths(app_instance)\n db_package = cls.from_paths(app_instance, paths)\n return db_package\n\n @classmethod\n @abstractmethod\n def _determine_paths(cls, app_instance: \"AppInstance\") -> List[str]:\n ...\n\n @classmethod\n @abstractmethod\n def from_dict(cls, d: List[Dict[str, Any]]) -> \"PackageDatabase[PT]\":\n ...\n\n def to_dict(self) -> List[Dict[str, Any]]:\n return [p.to_dict() for p in self.values()]\n\n def __len__(self) -> int:\n \"\"\"Returns the number of packages within this database.\"\"\"\n return len(self._contents)\n\n def __getitem__(self, name: str) -> PT:\n \"\"\"Fetches the description for a given package.\n\n Raises\n ------\n KeyError\n if no package exists with the given name.\n \"\"\"\n return self._contents[name]\n\n def __iter__(self) -> Iterator[str]:\n \"\"\"\n Returns an iterator over the names of the packages contained within\n this database.\n \"\"\"\n yield from self._contents\n", "sub_path": "src/roswire/common/package.py", "file_name": "package.py", "file_ext": "py", "file_size_in_byte": 4765, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "typing.TYPE_CHECKING", "line_number": 24, "usage_type": "attribute"}, {"api_name": "typing.TypeVar", "line_number": 27, "usage_type": "call"}, {"api_name": "msg.MsgFormat", "line_number": 27, "usage_type": "name"}, {"api_name": "typing.TypeVar", "line_number": 28, "usage_type": "call"}, {"api_name": "srv.SrvFormat", "line_number": 28, "usage_type": "name"}, {"api_name": "typing.TypeVar", "line_number": 29, "usage_type": "call"}, {"api_name": "action.ActionFormat", "line_number": 29, "usage_type": "name"}, {"api_name": "typing.Generic", "line_number": 32, "usage_type": "name"}, {"api_name": "abc.ABC", "line_number": 32, "usage_type": "name"}, {"api_name": "typing.Collection", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Collection", "line_number": 36, "usage_type": "name"}, {"api_name": "typing.Collection", "line_number": 37, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 41, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 40, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 44, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 44, "usage_type": "name"}, {"api_name": "typing.TypeVar", "line_number": 55, "usage_type": "call"}, {"api_name": "typing.Generic", "line_number": 59, "usage_type": "name"}, {"api_name": "abc.ABC", "line_number": 59, "usage_type": "name"}, {"api_name": "typing.Mapping", "line_number": 59, "usage_type": "name"}, {"api_name": "typing.Mapping", "line_number": 75, "usage_type": "name"}, {"api_name": "attr.ib", "line_number": 75, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 79, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 86, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 109, "usage_type": "name"}, {"api_name": "loguru.logger.exception", "line_number": 114, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 114, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 122, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 129, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 129, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 137, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 138, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 143, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 143, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 143, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 142, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 146, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 146, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 146, "usage_type": "name"}, {"api_name": "typing.Iterator", "line_number": 163, "usage_type": "name"}, {"api_name": "attr.s", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "230507576", "text": "import numpy as np\nimport pandas as pd\nimport os as os\nimport matplotlib.pyplot as pyplot\nimport math as math\nimport scipy.stats as sp\n\nos.chdir('Data')\nmyarr = np.load('project data.npy')\n#df is the data in momentum eta phi form\ndf = pd.DataFrame(data=myarr, columns=[\"label\",\"weight\",\"lep0pt\",\"lep0eta\",\"lep0phi\",\n\t\t\t\t\t\t\t\t\t\t \"lep1pt\",\"lep1eta\",\"lep1phi\",\n\t\t\t\t\t\t\t\t\t\t \"jet0pt\",\"jet0eta\",\"jet0phi\",\n\t\t\t\t\t\t\t\t\t\t \"jet1pt\",\"jet1eta\",\"jet1phi\",\n\t\t\t\t\t\t\t\t\t\t \"jet2pt\",\"jet2eta\",\"jet2phi\",\n\t\t\t\t\t\t\t\t\t\t \"MET\",\"METSig\",\"NBJet\",\"NEvent\"])\ncolumns = df.columns.values\n#adding the x y z momentum data\nfor i in [0,1,2,3,4]:\n\tp = df.iloc[:,2+3*i]*np.cosh(df.iloc[:,3+3*i])\n\tpx = df.iloc[:,2+3*i]*np.cos(df.iloc[:,4+3*i])\n\tpy = df.iloc[:,2+3*i]*np.sin(df.iloc[:,4+3*i])\n\tpz = df.iloc[:,2+3*i]*np.sinh(df.iloc[:,3+3*i])\n\tparticlename = columns[2+3*i][:4]\n\tdf[('%sp' % (particlename))] = p\n\tdf[('%spx' % (particlename))] = px\n\tdf['%spy' % (particlename)] = py\n\tdf['%spz' % (particlename)] = pz \n #Linear momentum data has been added to the dataframe\n \nsuffix4vc = ['p','px','py','pz']\n\t \n#Arrays to allow me to reference properties of particles more systematically\n\t \nfor x in suffix4vc:\n df[('lepcombined%s') % (x)] = df[('lep0%s') % (x)] + df[('lep1%s') % (x)]\n df[('jetcombined%s') % (x)] = df[('jet0%s') % (x)] + df[('jet1%s') % (x)]\n df[('allcombined%s' % (x))] = df[('lep0%s') % (x)] + df[('lep1%s') % (x)] + df[('jet0%s') % (x)] + df[('jet1%s') % (x)]\n \n#Assume E is equal to p\ndef invMass(df,system,p,px,py,pz):\n return (df['%s%s' % (system,p)]*df['%s%s' % (system,p)] - df['%s%s' % (system,px)]*df['%s%s' % (system,px)] - df['%s%s' % (system,py)]*df['%s%s' % (system,py)] - df['%s%s' % (system,pz)]*df['%s%s' % (system,pz)]).apply(math.sqrt)\n\ndf['lepcombinedinvariantmass'] = invMass(df,'lepcombined','p','px','py','pz')\ndf['jetcombinedinvariantmass'] = invMass(df,'jetcombined','p','px','py','pz')\ndf['allcombinedinvariantmass'] = invMass(df,'allcombined','p','px','py','pz')\n\ninvariantmasses = [df.label, df.jetcombinedinvariantmass, df.lepcombinedinvariantmass, df.allcombinedinvariantmass]\ninvariantmassesdf = pd.DataFrame(data=invariantmasses, columns=['label','jet','lep','combined'])\n\nsignal = df[df.label >= 300000]\nbackground = df[df.label <= 300000]\n\ndef histweighteddrac(data,weight):\n #ad hoc modification of the freedman diaconic formula for the weighted data to decide bin sizes\n constant = 1\n binsize = constant*2*sp.stats.iqr(data)/pow(len(data),1/4)\n binn = np.arange(min(data),max(data)+binsize,binsize)\n pyplot.hist(data,bins=binn, weights = weight, stacked = True, )\ndef histweighteddracnostack(data,weight):\n #ad hoc modification of the freedman diaconic formula for the weighted data to decide bin sizes\n constant = 1\n binsize = constant*2*sp.stats.iqr(data)/pow(len(data),1/4)\n binn = np.arange(min(data),max(data)+binsize,binsize)\n pyplot.hist(data,bins=binn, weights = weight, stacked = False, )\n\npyplot.figure(0)\nhistweighteddrac(signal.allcombinedinvariantmass, signal.weight)\npyplot.ylabel('Frequency')\npyplot.xlabel('Combined Mass All / Total Signal')\npyplot.savefig('testallsignal.png')\npyplot.figure(1)\nhistweighteddrac(signal.lepcombinedinvariantmass,signal.weight)\npyplot.ylabel('Frequency')\npyplot.xlabel('Combined Mass Jet / Total Signal')\npyplot.savefig('testJetsignal.png')\npyplot.figure(2)\nhistweighteddrac(signal.jetcombinedinvariantmass,signal.weight)\npyplot.ylabel('Frequency')\npyplot.xlabel('Combined Mass Lep / Total Signal')\npyplot.savefig('testLepsignal.png')\n\npyplot.figure(0)\nhistweighteddrac(background.allcombinedinvariantmass,background.weight)\npyplot.ylabel('Frequency')\npyplot.xlabel('Combined Mass All / SigandBack')\npyplot.savefig('testallsigandback.png')\npyplot.legend(('signal','background'))\npyplot.figure(1)\nhistweighteddrac(background.lepcombinedinvariantmass,background.weight)\npyplot.ylabel('Frequency')\npyplot.xlabel('Combined Mass Lep /SigandBack')\npyplot.savefig('testJetsigandback.png')\npyplot.legend(('signal','background'))\npyplot.figure(2)\nhistweighteddrac(background.jetcombinedinvariantmass,background.weight)\npyplot.ylabel('Frequency')\npyplot.xlabel('Combined Mass jet / SigandBack')\npyplot.legend(('signal','background'))\npyplot.savefig('testLepsigandback.png')\n\npyplot.figure(3)\nhistweighteddrac(background.allcombinedinvariantmass,background.weight)\npyplot.ylabel('Frequency')\npyplot.xlabel('Combined Mass All / Signal and Background Superimposed')\npyplot.savefig('testallback.png')\npyplot.figure(4)\nhistweighteddrac(background.lepcombinedinvariantmass,background.weight)\npyplot.ylabel('Frequency')\npyplot.xlabel('Combined Mass lep / Signal and Background Superimposed')\npyplot.savefig('testJetback.png')\npyplot.figure(5)\nhistweighteddrac(background.jetcombinedinvariantmass,background.weight)\npyplot.ylabel('Frequency')\npyplot.xlabel('Combined Mass jet / Signal and Background Superimposed')\npyplot.savefig('testLepback.png')\n\npyplot.figure(6)\nhistweighteddrac(df.allcombinedinvariantmass,df.weight)\npyplot.ylabel('Frequency')\npyplot.xlabel('Combined Mass All / All events')\npyplot.savefig('testallfull.png')\npyplot.figure(7)\nhistweighteddrac(df.lepcombinedinvariantmass,df.weight)\npyplot.ylabel('Frequency')\npyplot.xlabel('Combined Mass lep / All events')\npyplot.savefig('testJetfull.png')\npyplot.figure(8)\nhistweighteddrac(df.jetcombinedinvariantmass,df.weight)\npyplot.ylabel('Frequency')\npyplot.xlabel('Combined Mass jet / All events')\npyplot.savefig('testLepfull.png')\nhistogramdata = pd.DataFrame(columns = (\"Label\",\"N\"))\n\n\nfor x,y in zip(signal.label.unique(),range(len(signal.label.unique()))):\n signalplot = df[df.label == x]\n \n pyplot.figure(9+4*y)\n histweighteddrac(signalplot.allcombinedinvariantmass,signalplot.weight)\n pyplot.ylabel('Frequency')\n pyplot.xlabel('Combined Mass All / %s' % x)\n pyplot.savefig('%stestallfull.png' % x)\n \n \n pyplot.figure(10+4*y)\n histweighteddrac(signalplot.lepcombinedinvariantmass,signalplot.weight)\n pyplot.ylabel('Frequency')\n pyplot.xlabel('Combined Mass lep / %s' % x)\n pyplot.savefig('%stestlepfull.png' % x)\n \n \n \n pyplot.figure(11+4*y)\n histweighteddrac(signalplot.jetcombinedinvariantmass,signalplot.weight)\n pyplot.ylabel('Frequency')\n pyplot.xlabel('Combined Mass jet / %s' % x)\n pyplot.savefig('%stestjetfull.png' % x)\n \n \n \n pyplot.figure(12+4*y)\n histweighteddracnostack(signalplot.jetcombinedinvariantmass,signalplot.weight)\n histweighteddracnostack(signalplot.lepcombinedinvariantmass,signalplot.weight)\n histweighteddracnostack(signalplot.allcombinedinvariantmass,signalplot.weight)\n \n pyplot.ylabel('Frequency')\n pyplot.xlabel('Combined Mass jet / %s' % x)\n pyplot.savefig('%scombined.png' % x)\n pyplot.legend(('jet','lep','combined'))\n histogramdata.loc[y] = ([x,np.sum(signalplot.weight)])\n \n#From these plots the peaks on the jets and the combined peaks are quite clear and there's only one so some method of automatically obtaining peaks from the base data will be employed", "sub_path": "Histogram scripts/histogram.py", "file_name": "histogram.py", "file_ext": "py", "file_size_in_byte": 7072, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "os.chdir", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 9, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.cosh", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.sinh", "line_number": 23, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 49, "usage_type": "call"}, {"api_name": "scipy.stats.stats.iqr", "line_number": 57, "usage_type": "call"}, {"api_name": "scipy.stats.stats", "line_number": 57, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 57, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "scipy.stats.stats.iqr", "line_number": 63, "usage_type": "call"}, {"api_name": "scipy.stats.stats", "line_number": 63, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 63, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 128, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 139, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 142, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 149, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 149, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 150, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 154, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 156, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 156, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 157, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 157, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 158, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 167, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 168, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 168, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 169, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 169, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 170, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 170, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 171, "usage_type": "call"}]} +{"seq_id": "36362721", "text": "\"\"\"\nTest the customer model.\n\"\"\"\n\nimport unittest\nimport peewee as pw\nimport os\nimport sys\n\n_dir = os.path.dirname(os.path.realpath(__file__))\n\nsys.path.append(os.path.dirname(_dir))\nfrom customer_model import Customer\n\nTEST_DB = pw.SqliteDatabase(':memory:')\n\n\nclass TestCustomer(unittest.TestCase):\n \"\"\"Test the Customer model.\"\"\"\n\n db = None\n\n def setUp(self) -> None:\n\n TEST_DB.bind([Customer])\n TEST_DB.connect()\n TEST_DB.create_tables([Customer])\n\n self.definitions = {'Bob': {'id': 1, 'name': 'Bob', 'last_name': 'Xavi',\n 'address': \"505 N Thayer\", 'phone': '713-874-2356',\n 'email': 'bobxavi@comcast.net', 'status': True,\n 'credit_limit': 3400.12},\n 'Alice': {'id': 2, 'name': 'Alice', 'last_name': 'Wond',\n 'address': \"507 N Thayer\", 'phone': '713-874-0001',\n 'email': 'alice@gmail.com', 'status': False,\n 'credit_limit': 12000},\n 'Bob2': {'id': 3, 'name': 'Bob', 'last_name': 'Xavi',\n 'address': \"509 S Main\", 'phone': '281-874-2356',\n 'email': 'bobxavi2@comcast.net', 'status': True,\n 'credit_limit': 1}\n }\n\n # Add all 3 people to database\n for person, defin in self.definitions.items():\n created = Customer.create(**defin)\n created.save()\n\n def tearDown(self) -> None:\n # Delete everything from database\n TEST_DB.drop_tables([Customer])\n TEST_DB.close()\n\n def test_create_customer(self):\n \"\"\"Tests creating a customer.\"\"\"\n\n new_cust = {'id': 5, 'name': 'Bob', 'last_name': 'Xavi',\n 'address': \"509 S Main\", 'phone': '281-874-2356',\n 'email': 'bobxavi2@comcast.net', 'status': True,\n 'credit_limit': 1}\n\n created = Customer.create(**new_cust)\n\n # Assert that customer was created correctly\n for attr, val in new_cust.items():\n self.assertEqual(val, getattr(created, attr))\n\n def test_create_dup(self):\n \"\"\"Tests creating a customer with same ID.\"\"\"\n\n with self.assertRaises(Exception):\n created = Customer.create(**self.definitions['Bob'])\n\n\n\n\n", "sub_path": "students/zach_meves/lesson03/tests/test_model.py", "file_name": "test_model.py", "file_ext": "py", "file_size_in_byte": 2491, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "os.path.dirname", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 12, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "peewee.SqliteDatabase", "line_number": 15, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 18, "usage_type": "attribute"}, {"api_name": "customer_model.Customer", "line_number": 25, "usage_type": "name"}, {"api_name": "customer_model.Customer", "line_number": 27, "usage_type": "name"}, {"api_name": "customer_model.Customer.create", "line_number": 45, "usage_type": "call"}, {"api_name": "customer_model.Customer", "line_number": 45, "usage_type": "name"}, {"api_name": "customer_model.Customer", "line_number": 50, "usage_type": "name"}, {"api_name": "customer_model.Customer.create", "line_number": 61, "usage_type": "call"}, {"api_name": "customer_model.Customer", "line_number": 61, "usage_type": "name"}, {"api_name": "customer_model.Customer.create", "line_number": 71, "usage_type": "call"}, {"api_name": "customer_model.Customer", "line_number": 71, "usage_type": "name"}]} +{"seq_id": "536295587", "text": "### 5/30 with split equally version\n'''\nBuild simple and modular functions to be utilized for machine learning,\nincluding analyzing data, training classification models, and evaluating models.\n'''\n\nimport datetime\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import LinearSVC, SVC\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import accuracy_score, r2_score, precision_score, recall_score,\\\n mean_absolute_error, mean_squared_error\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import GridSearchCV\n\n#1) Read data\ndef read_data(filename):\n '''\n Read csv data into pandas dataframe.\n Inputs: filename\n Output: pandas dataframe\n '''\n return pd.read_csv(filename)\n\n\n#2) Explore data\ndef find_min_max(dataframe, col):\n '''\n Find minimum and maximum value of a column.\n Inputs: dataframe, column name (str)\n Output: minimum and maximum in tuple\n '''\n col_min = dataframe[col].min()\n col_max = dataframe[col].max()\n return col_min, col_max\n\ndef find_mean_std(dataframe):\n '''\n Find mean and standard deviation for each columns in a dataframe.\n Inputs: dataframe\n Output: mean and standard deviation\n '''\n return dataframe.describe().loc[['mean', 'std']]\n\ndef plot_distribution(dataframe, figure_size, x_val, y_label, x_label, title):\n '''\n Plot visualization graph in cdf form\n Inputs: dataframe after resetting index\n figure size (tuple)\n Output: graph visualization of cdf\n '''\n sns.set(rc={'figure.figsize':figure_size})\n sns.lineplot(y=dataframe.index, x=x_val, data=dataframe)\n plt.ylabel(y_label)\n plt.xlabel(x_label)\n plt.title(title)\n return plt.show()\n\n\n#3) Create training and testing sets\ndef create_train_test(dataframe, test_split_size, rand_state):\n '''\n Split into train and test data\n Inputs: dataframe, split size, random state\n Output: train data, test data\n '''\n train_df, test_df = train_test_split(dataframe, test_size=test_split_size,\n random_state=rand_state)\n return train_df, test_df\n\n\n#4) Pre-process data\ndef convert_to_numeric(dataframe, col_list):\n '''\n Convert columns in a dataframe into numeric features.\n Inputs: dataframe, list of columns to be converted to numeric\n Output: dataframe\n '''\n for col in col_list:\n dataframe[col] = dataframe[col].astype(int)\n return dataframe\n\ndef impute_missing_values_train(train_df, col_list):\n '''\n Impute missing values with median for training data.\n Inputs: training dataframe, list of columns to be imputed\n Output: training dataframe\n '''\n for col in col_list:\n train_df[col].fillna(train_df[col].median(), inplace=True)\n\ndef impute_missing_values_test(train_df, test_df, col_list):\n '''\n Impute missing values with median for testing data.\n Inputs: training dataframe, testing dataframe,\n list of columns to be imputed\n Output: testing dataframe\n '''\n for col in col_list:\n test_df[col].fillna(train_df[col].median(), inplace=True)\n\ndef normalize_train(train_df, col_list):\n '''\n Normalize training data with means and standard deviations.\n Inputs: training dataframe, list of columns to be normalized\n Output: training dataframe\n '''\n for col in col_list:\n df_col = pd.DataFrame(train_df[col])\n train_df[col] = (df_col - df_col.mean()) / df_col.std()\n return train_df\n\ndef normalize_test(train_df, test_df, col_list):\n '''\n Normalize testing data with means and standard deviations\n from training data.\n Inputs: training dataframe, testing dataframe,\n list of columns to be normalized\n Output: testing dataframe\n '''\n for col in col_list:\n train_col = pd.DataFrame(train_df[col])\n test_col = pd.DataFrame(test_df[col])\n test_df[col] = (test_col - train_col.mean()) / train_col.std()\n return test_df\n\n\n#5) Generate features\ndef one_hot_encoding(dataframe, col, prefix_name):\n '''\n Create columns for categorical features to avoid ordering of features.\n Inputs: dataframe, column to convert, prefix name for new columns\n Output: dataframe with one-hot-coded columns\n '''\n coded_df = pd.get_dummies(dataframe[col], prefix=prefix_name)\n return coded_df\n\ndef discretize_cont_var(dataframe, col, num_categories, labels_lst):\n '''\n Discretize continuous variables.\n Inputs: dataframe, column, nunber of discrete categories, labels list\n Output: None\n '''\n dataframe[col] = pd.cut(dataframe[col], num_categories, labels=labels_lst)\n\ndef categorize_col_by_n_groups(df, col, n, lower_bound, upper_bound):\n '''\n Categorizes the values of the given columns by n groups of equal length,\n bounded by the given lower/upper bounds.\n\n Inputs:\n df (pd.DataFrame): Pandas dataframe being explored\n col (str): name of the column to be sectioned\n n (int): number of groups\n lower_bound (float): lower bound defining the range of the values in \n the given column\n upper_bound (float): upper bound defining the range of the values in\n the given column\n Output:\n (pd.DataFrame): Pandas dataframe with n groups of categorized values.\n Labeling scheme of each group is 'g1', 'g2', ..., 'gn'.\n '''\n bins = np.linspace(lower_bound, upper_bound, num=n+1)\n labels = ['g' + str(x+1) for x in range(n)]\n return pd.cut(df[col], bins, labels=labels, include_lowest=True)\n\ndef categorize_col_equally_by_n_groups(df, col, n):\n '''\n Categorizes the values of the given columns by n groups of equal length,\n bounded by the given lower/upper bounds.\n\n Inputs:\n df (pd.DataFrame): Pandas dataframe being explored\n col (str): name of the column to be sectioned\n n (int): number of groups\n lower_bound (float): lower bound defining the range of the values in \n the given column\n upper_bound (float): upper bound defining the range of the values in\n the given column\n Output:\n (pd.DataFrame): Pandas dataframe with n groups of categorized values.\n Labeling scheme of each group is 'g1', 'g2', ..., 'gn'.\n '''\n labels = ['g' + str(x+1) for x in range(n)]\n return pd.qcut(df[col], n, labels=labels)\n\n#6) Build pipeline for training and testing machine learning models\ndef build_apply_model(X_train, Y_train, X_test, Y_test, MODELS, GRID, TYPE, cv):\n '''\n Build and train classifiers on machine learning model.\n Inputs: train data, test data, model name, grid, model type\n Output: results of classifiers including model type, parameteres,\n and accuracy score along with y_predict values\n '''\n\n start = datetime.datetime.now()\n results = []\n if TYPE =='classifier':\n scoring ='accuracy'\n if TYPE =='regression':\n scoring = 'r2'\n\n for model_key in MODELS.keys():\n print(\"Training model:\", model_key) \n gs = GridSearchCV(estimator=MODELS[model_key],\n param_grid=GRID[model_key],\n scoring=scoring,\n cv=cv)\n gs = gs.fit(X_train, Y_train)\n best_score = gs.best_score_\n best_params = gs.best_params_\n print('best score', \"|\", best_score, 'best params', \"|\", best_params)\n best_model = gs.best_estimator_\n best_model.fit(X_train, Y_train)\n Y_pred = best_model.predict(X_test)\n\n if TYPE =='regression':\n r2 = r2_score(Y_test, Y_pred)\n MAE = mean_absolute_error(Y_test, Y_pred)\n MSE = mean_squared_error(Y_test, Y_pred)\n results.append([model_key, best_params, r2, MAE, MSE, best_model])\n elif TYPE == 'classifier':\n accuracy = evaluate_classifiers(Y_test, Y_pred)\n precision = get_precision(Y_test, Y_pred)\n recall = get_recall(Y_test, Y_pred)\n results.append([model_key, best_params, accuracy, precision, recall, best_model])\n\n else:\n print('Choose type : \"regression\" or \"classifier\"')\n break\n\n result = pd.DataFrame(results)\n if TYPE =='regression':\n result = result.rename(columns={0: 'Model', 1: \n 'Parameters', 2:'R2_score', 3:'MAE', 4:'MSE',\n 5:'best_model'})\n elif TYPE == 'classifier':\n result = result.rename(columns={0:'Model', 1:'Parameters', 2:'Accuracy Score',\n 3:'Precision Score', 4:'Recall Score' ,5:'best_model'})\n\n stop = datetime.datetime.now()\n print(\"Time Elapsed:\", stop - start)\n return result\n\n\n#7) Evaluate classifiers\ndef evaluate_classifiers(y_true, y_pred):\n '''\n Evaluate classifiers with accuracy score.\n Inputs: true y-value, predicted y-value\n Output: accuracy score\n '''\n return accuracy_score(y_true, y_pred)\n\ndef get_precision(y_true, y_pred):\n '''\n Evaluate classifiers with precision score.\n Inputs: true y-value, predicted y-value\n Output: accuracy score\n '''\n return precision_score(y_true, y_pred, average='macro')\n\ndef get_recall(y_true, y_pred):\n '''\n Evaluate classifiers with recall score.\n Inputs: true y-value, predicted y-value\n Output: accuracy score\n '''\n return recall_score(y_true, y_pred, average='macro')\n\n\n#8) Summarize model results\ndef summarize_best_model_result(df):\n '''\n Summarize the model with best results.\n Inputs: dataframe\n Output: information about the best model\n '''\n for colname in df.columns:\n print(colname, ': ', df[colname].head(1).values[0])\n\ndef obtain_match_rate(table, col_list, true_col):\n '''\n Obtain match rate of a column to another.\n Inputs: dataframe, list of columns for comparisons, true column as a \n basis for comparisons to be made against\n Outputs: match rate between columns\n '''\n for col in col_list:\n val = table[table[col] == table[true_col]].count()[col] / len(table)\n print(col, ': ', round(val, 2))\n", "sub_path": "pipeline_.py", "file_name": "pipeline_.py", "file_ext": "py", "file_size_in_byte": 10620, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pandas.read_csv", "line_number": 29, "usage_type": "call"}, {"api_name": "seaborn.set", "line_number": 58, "usage_type": "call"}, {"api_name": "seaborn.lineplot", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 73, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 115, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 128, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 129, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 141, "usage_type": "call"}, {"api_name": "pandas.cut", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 169, "usage_type": "call"}, {"api_name": "pandas.cut", "line_number": 171, "usage_type": "call"}, {"api_name": "pandas.qcut", "line_number": 191, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 202, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 202, "usage_type": "attribute"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 211, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 224, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_absolute_error", "line_number": 225, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 226, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 238, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 247, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 247, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 259, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 267, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 275, "usage_type": "call"}]} +{"seq_id": "204425111", "text": "import numpy as np\nimport os\nimport pandas as pd\nfrom pathlib import Path\n\n\ndef sort_tested():\n \"\"\"\n Sort tested data into the normal or tumorous folders based on the\n csv provided for this challenge.\n \"\"\"\n csv_path = \"data/reference.csv\"\n\n data_path = Path(\"data/test/\")\n tumour_save_path = \"data/test/tumour/\"\n normal_save_path = \"data/test/normal/\"\n\n df = pd.read_csv(csv_path, header=None)\n\n name = df.iloc[:, 0]\n # print(name)\n label = df.iloc[:, 1]\n\n for file_path in data_path.rglob(\"*.tif\"):\n # filename = \"/\".join(str(file_path).split(\"/\")[-2:])\n filename = str(file_path).split(\"/\")[-1]\n filename_noext = filename.split(\".\")[0]\n\n row = df.loc[df.iloc[:, 0] == filename_noext]\n\n print(\"FNAME: {}\".format(filename))\n\n label = row.iloc[:, 1].values\n\n if type(label) != \"str\":\n label = label[0]\n\n if label == \"Normal\":\n savepath = normal_save_path + filename\n else:\n savepath = tumour_save_path + filename\n\n os.rename(file_path, savepath)\n\n\ndef rename_stupid():\n fpath = Path(\"data/test/\")\n\n for file_path in fpath.rglob(\"*.tif\"):\n spl = str(file_path).split(\"normal\")\n\n if len(spl) > 1:\n fname = spl[1]\n\n old_path = \"/\".join(str(file_path).split(\"/\")[:-1]) + \"/\"\n\n os.rename(file_path, old_path + fname)\n\n\ndef main():\n sort_tested()\n # rename_stupid()\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "sort_tested.py", "file_name": "sort_tested.py", "file_ext": "py", "file_size_in_byte": 1500, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pathlib.Path", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 18, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 43, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 47, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "604296549", "text": "import pkg_resources\nfrom pymongo import UpdateOne\nimport os\nfrom dotenv import load_dotenv\nimport simplejson as json\nfrom bson import json_util\nimport threading\n\nfrom smug.mongo_manager import MongoManager\nfrom smug.connection_manager import ConnectionManager\n\n\nclass MongoSave():\n def __init__(self, write_buffer_size, buffer_enabled=True):\n self.buffer = {}\n self.write_buffer_size = write_buffer_size\n self.ch = None\n self.mongo_manager = MongoManager()\n self.lock = threading.RLock()\n self.buffer_enabled = buffer_enabled\n\n def save(self):\n self.lock.acquire()\n if len(self.buffer) > 0:\n messages = self.buffer.values()\n latest_message = json.dumps(list(messages)[-1], default=json_util.default)\n connection_manager.publish_to_queue('latest', latest_message)\n requests = [UpdateOne({'metadata.url': value['metadata']['url']},\n {'$setOnInsert': {\n 'metadata': value['metadata'],\n 'author': value['author'],\n 'message': value['message']\n },\n '$addToSet': {'reports': {\"$each\": value['reports']}}},\n upsert=True)\n for value in messages]\n for delivery_tag in self.buffer:\n # Ack to the MQ\n self.ch.basic_ack(delivery_tag=delivery_tag)\n\n self.mongo_manager.message_collection.bulk_write(requests)\n self.buffer.clear()\n self.lock.release()\n\n def callback(self, ch, method, properties, body):\n self.lock.acquire()\n self.ch = ch\n self.buffer[method.delivery_tag] = (json.loads(body, object_hook=json_util.object_hook))\n self.lock.release()\n\n # Writes to the database if the buffer is the correct length\n if len(self.buffer) >= self.write_buffer_size or not self.buffer_enabled:\n self.save()\n\n\nif __name__ == '__main__':\n env_location = pkg_resources.resource_filename('resources', '.env')\n if os.environ.get('DOTENV_LOADED', '0') != '1':\n load_dotenv(env_location)\n mongouri = os.environ.get(\"MONGODB_URI\", \"mongodb://localhost:27017/smug\")\n mongodb = os.environ.get(\"MONGODB_DATABASE\", \"smug\")\n write_buffer_size = int(os.environ.get(\"MONGO_WRITE_BUFFER\", 100))\n prefetch_count = int(os.environ.get(\"PREFETCH_COUNT\", 500))\n\n if write_buffer_size > prefetch_count:\n raise ValueError('MongoDB write buffer should not exceed prefetch count. This will cause the')\n\n mongo_save = MongoSave(write_buffer_size=write_buffer_size, buffer_enabled=True)\n connection_manager = ConnectionManager()\n connection_manager.subscribe_to_queue('save', mongo_save.callback)\n", "sub_path": "smug/savers/mongo_save.py", "file_name": "mongo_save.py", "file_ext": "py", "file_size_in_byte": 2897, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "smug.mongo_manager.MongoManager", "line_number": 18, "usage_type": "call"}, {"api_name": "threading.RLock", "line_number": 19, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 26, "usage_type": "call"}, {"api_name": "bson.json_util.default", "line_number": 26, "usage_type": "attribute"}, {"api_name": "bson.json_util", "line_number": 26, "usage_type": "name"}, {"api_name": "pymongo.UpdateOne", "line_number": 28, "usage_type": "call"}, {"api_name": "simplejson.loads", "line_number": 48, "usage_type": "call"}, {"api_name": "bson.json_util.object_hook", "line_number": 48, "usage_type": "attribute"}, {"api_name": "bson.json_util", "line_number": 48, "usage_type": "name"}, {"api_name": "pkg_resources.resource_filename", "line_number": 57, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 58, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 58, "usage_type": "attribute"}, {"api_name": "dotenv.load_dotenv", "line_number": 59, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 60, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 61, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 61, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 62, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 63, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 63, "usage_type": "attribute"}, {"api_name": "smug.connection_manager.ConnectionManager", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "612894290", "text": "# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom rack.openstack.common.db.sqlalchemy import models\n\nfrom sqlalchemy import Boolean, Column, ForeignKey, Integer, String\nfrom sqlalchemy import Text, schema\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relationship\n\nBase = declarative_base()\n\n\nclass Group(models.SoftDeleteMixin,\n models.TimestampMixin,\n models.ModelBase,\n Base):\n\n __tablename__ = 'groups'\n securitygroups = relationship(\"Securitygroup\")\n processes = relationship(\"Process\")\n\n gid = Column(String(36), primary_key=True)\n user_id = Column(String(255))\n project_id = Column(String(255))\n display_name = Column(String(255))\n display_description = Column(String(255))\n status = Column(String(255))\n\n\nclass Service(models.SoftDeleteMixin,\n models.TimestampMixin,\n models.ModelBase,\n Base):\n\n \"\"\"Represents a running service on a host.\"\"\"\n\n __tablename__ = 'services'\n __table_args__ = (\n schema.UniqueConstraint(\"host\", \"topic\", \"deleted\",\n name=\"uniq_services0host0topic0deleted\"),\n schema.UniqueConstraint(\"host\", \"binary\", \"deleted\",\n name=\"uniq_services0host0binary0deleted\")\n )\n\n id = Column(Integer, primary_key=True)\n host = Column(String(255))\n binary = Column(String(255))\n topic = Column(String(255))\n report_count = Column(Integer, nullable=False, default=0)\n disabled = Column(Boolean, default=False)\n disabled_reason = Column(String(255))\n\n\nclass Network(models.SoftDeleteMixin,\n models.TimestampMixin,\n models.ModelBase,\n Base):\n\n __tablename__ = 'networks'\n\n network_id = Column(String(255), primary_key=True)\n gid = Column(String(255))\n neutron_network_id = Column(String(255))\n is_admin = Column(Boolean, default=False)\n cidr = Column(String(255))\n ext_router = Column(String(255))\n user_id = Column(String(255))\n project_id = Column(String(255))\n display_name = Column(String(255))\n\n\nclass Keypair(models.SoftDeleteMixin,\n models.TimestampMixin,\n models.ModelBase,\n Base):\n\n __tablename__ = 'keypairs'\n\n keypair_id = Column(String(36), primary_key=True)\n gid = Column(String(36), ForeignKey('groups.gid'), nullable=False)\n user_id = Column(String(255))\n project_id = Column(String(255))\n nova_keypair_id = Column(String(255))\n private_key = Column(Text)\n display_name = Column(String(255))\n is_default = Column(Boolean, default=False)\n\n\nclass Securitygroup(models.SoftDeleteMixin,\n models.TimestampMixin,\n models.ModelBase,\n Base):\n\n __tablename__ = 'securitygroups'\n\n deleted = Column(Integer, nullable=False, default=0)\n securitygroup_id = Column(String(36), primary_key=True)\n gid = Column(String(36), ForeignKey('groups.gid'))\n neutron_securitygroup_id = Column(String(36))\n is_default = Column(Boolean, default=False)\n user_id = Column(String(255))\n project_id = Column(String(255))\n display_name = Column(String(255))\n\n group = relationship(\"Group\",\n foreign_keys=gid,\n primaryjoin='and_('\n 'Securitygroup.gid == Group.gid,'\n 'Securitygroup.deleted == 0,'\n 'Group.deleted == 0)')\n\n\nclass Process(models.SoftDeleteMixin,\n models.TimestampMixin,\n models.ModelBase,\n Base):\n\n __tablename__ = 'processes'\n\n deleted = Column(Integer, nullable=False, default=0)\n gid = Column(String(36), ForeignKey('groups.gid'), nullable=False)\n keypair_id = Column(String(36), ForeignKey('keypairs.keypair_id'))\n pid = Column(String(36), primary_key=True)\n ppid = Column(String(36), ForeignKey('processes.pid'))\n nova_instance_id = Column(String(36))\n glance_image_id = Column(String(36))\n nova_flavor_id = Column(Integer)\n user_id = Column(String(255))\n project_id = Column(String(255))\n display_name = Column(String(255))\n is_proxy = Column(Boolean(), default=False)\n shm_endpoint = Column(Text)\n ipc_endpoint = Column(Text)\n fs_endpoint = Column(Text)\n args = Column(Text)\n userdata = Column(Text)\n app_status = Column(Text)\n\n group = relationship(\"Group\",\n foreign_keys=gid,\n primaryjoin='and_('\n 'Process.gid == Group.gid,'\n 'Process.deleted == 0,'\n 'Group.deleted == 0)')\n\n securitygroups = relationship(\"Securitygroup\",\n secondary=\"processes_securitygroups\",\n primaryjoin='and_('\n 'Process.pid == ProcessSecuritygroup.pid,'\n 'Process.deleted == 0)',\n secondaryjoin='and_('\n 'Securitygroup.securitygroup_id == '\n 'ProcessSecuritygroup.securitygroup_id,'\n 'Securitygroup.deleted == 0)',\n backref=\"processes\")\n\n networks = relationship(\"Network\",\n secondary=\"processes_networks\",\n primaryjoin='and_('\n 'Process.pid == ProcessNetwork.pid,'\n 'Process.deleted == 0)',\n secondaryjoin='and_('\n 'Network.network_id == ProcessNetwork.network_id,'\n 'Network.deleted == 0)',\n backref=\"processes\")\n\n\nclass ProcessSecuritygroup(models.ModelBase, Base):\n\n __tablename__ = 'processes_securitygroups'\n\n pid = Column(String(36), ForeignKey(\n 'processes.pid'), nullable=False, primary_key=True)\n securitygroup_id = Column(String(36), ForeignKey(\n 'securitygroups.securitygroup_id'), nullable=False, primary_key=True)\n\n\nclass ProcessNetwork(models.ModelBase, Base):\n\n __tablename__ = 'processes_networks'\n\n pid = Column(String(36), ForeignKey(\n 'processes.pid'), nullable=False, primary_key=True)\n network_id = Column(String(36), ForeignKey(\n 'networks.network_id'), nullable=False, primary_key=True)\n", "sub_path": "rack/db/sqlalchemy/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 7061, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "sqlalchemy.ext.declarative.declarative_base", "line_number": 21, "usage_type": "call"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models.SoftDeleteMixin", "line_number": 24, "usage_type": "attribute"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models", "line_number": 24, "usage_type": "name"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models.TimestampMixin", "line_number": 25, "usage_type": "attribute"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models", "line_number": 25, "usage_type": "name"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models.ModelBase", "line_number": 26, "usage_type": "attribute"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models", "line_number": 26, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 36, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 36, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 37, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 37, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 38, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 38, "usage_type": "call"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models.SoftDeleteMixin", "line_number": 41, "usage_type": "attribute"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models", "line_number": 41, "usage_type": "name"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models.TimestampMixin", "line_number": 42, "usage_type": "attribute"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models", "line_number": 42, "usage_type": "name"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models.ModelBase", "line_number": 43, "usage_type": "attribute"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models", "line_number": 43, "usage_type": "name"}, {"api_name": "sqlalchemy.schema.UniqueConstraint", "line_number": 50, "usage_type": "call"}, {"api_name": "sqlalchemy.schema", "line_number": 50, "usage_type": "name"}, {"api_name": "sqlalchemy.schema.UniqueConstraint", "line_number": 52, "usage_type": "call"}, {"api_name": "sqlalchemy.schema", "line_number": 52, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 56, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 56, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 57, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 57, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 58, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 58, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 59, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 59, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 60, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 60, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 61, "usage_type": "call"}, {"api_name": "sqlalchemy.Boolean", "line_number": 61, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 62, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 62, "usage_type": "call"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models.SoftDeleteMixin", "line_number": 65, "usage_type": "attribute"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models", "line_number": 65, "usage_type": "name"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models.TimestampMixin", "line_number": 66, "usage_type": "attribute"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models", "line_number": 66, "usage_type": "name"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models.ModelBase", "line_number": 67, "usage_type": "attribute"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models", "line_number": 67, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 72, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 72, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 73, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 73, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 74, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 74, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 75, "usage_type": "call"}, {"api_name": "sqlalchemy.Boolean", "line_number": 75, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 76, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 76, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 77, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 77, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 78, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 78, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 79, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 79, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 80, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 80, "usage_type": "call"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models.SoftDeleteMixin", "line_number": 83, "usage_type": "attribute"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models", "line_number": 83, "usage_type": "name"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models.TimestampMixin", "line_number": 84, "usage_type": "attribute"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models", "line_number": 84, "usage_type": "name"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models.ModelBase", "line_number": 85, "usage_type": "attribute"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models", "line_number": 85, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 90, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 90, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 91, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 91, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 91, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 92, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 92, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 93, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 93, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 94, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 94, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 95, "usage_type": "call"}, {"api_name": "sqlalchemy.Text", "line_number": 95, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 96, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 96, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 97, "usage_type": "call"}, {"api_name": "sqlalchemy.Boolean", "line_number": 97, "usage_type": "argument"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models.SoftDeleteMixin", "line_number": 100, "usage_type": "attribute"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models", "line_number": 100, "usage_type": "name"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models.TimestampMixin", "line_number": 101, "usage_type": "attribute"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models", "line_number": 101, "usage_type": "name"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models.ModelBase", "line_number": 102, "usage_type": "attribute"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models", "line_number": 102, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 107, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 107, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 108, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 108, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 109, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 109, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 109, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 110, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 110, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 111, "usage_type": "call"}, {"api_name": "sqlalchemy.Boolean", "line_number": 111, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 112, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 112, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 113, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 113, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 114, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 114, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 116, "usage_type": "call"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models.SoftDeleteMixin", "line_number": 124, "usage_type": "attribute"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models", "line_number": 124, "usage_type": "name"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models.TimestampMixin", "line_number": 125, "usage_type": "attribute"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models", "line_number": 125, "usage_type": "name"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models.ModelBase", "line_number": 126, "usage_type": "attribute"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models", "line_number": 126, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 131, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 131, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 132, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 132, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 132, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 133, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 133, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 133, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 134, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 134, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 135, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 135, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 135, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 136, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 136, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 137, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 137, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 138, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 138, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 139, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 139, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 140, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 140, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 141, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 141, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 142, "usage_type": "call"}, {"api_name": "sqlalchemy.Boolean", "line_number": 142, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 143, "usage_type": "call"}, {"api_name": "sqlalchemy.Text", "line_number": 143, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 144, "usage_type": "call"}, {"api_name": "sqlalchemy.Text", "line_number": 144, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 145, "usage_type": "call"}, {"api_name": "sqlalchemy.Text", "line_number": 145, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 146, "usage_type": "call"}, {"api_name": "sqlalchemy.Text", "line_number": 146, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 147, "usage_type": "call"}, {"api_name": "sqlalchemy.Text", "line_number": 147, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 148, "usage_type": "call"}, {"api_name": "sqlalchemy.Text", "line_number": 148, "usage_type": "argument"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 150, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 157, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 168, "usage_type": "call"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models.ModelBase", "line_number": 179, "usage_type": "attribute"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models", "line_number": 179, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 183, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 183, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 183, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 185, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 185, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 185, "usage_type": "call"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models.ModelBase", "line_number": 189, "usage_type": "attribute"}, {"api_name": "rack.openstack.common.db.sqlalchemy.models", "line_number": 189, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 193, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 193, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 193, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 195, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 195, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 195, "usage_type": "call"}]} +{"seq_id": "126990948", "text": "#!/bin/env python\n# -*- coding: utf-8 -*-\n\nimport difflib\nimport sys\nimport argparse\nimport webbrowser\nimport os\nimport filecmp\nimport subprocess\nimport platform\n\nfrom enum import Enum\n\nfrom comtool import *\nfrom gittool import *\n\nclass Color(Enum):\n white = 0\n green = 1\n red = 2\n yellow = 3\n \n\nbCI = False\nif len(sys.argv) > 2 and sys.argv[2] == \"ci\":\n bCI = True\n\nif bCI == False:\n print(\n \"************************************************************\\n\"\n \"如果下面报告类似 ImportError: No module named 'xxx' 的错误,\\n\"\n \"说明 python 运行环境中缺少必要的包。\\n\"\n \"此时可以启动\\\"安装python包\\\"任务来修复 python 运行环境(需要连接互联网)。\\n\"\n \"************************************************************\\n\"\n )\n\nfrom colorama import Fore, init, AnsiToWin32\ninit(wrap=False)\nstream = AnsiToWin32(sys.stderr).stream\n\nred = lambda text: '\\033[0;31;1m' + text + '\\033[0m'\ngreen = lambda text: '\\033[0;32;1m' + text + '\\033[0m'\nyellow = lambda text: '\\033[0;33;1m' + text + '\\033[0m'\n\n \n# 比较两个文件并把结果生成一份html文本\ndef compare_file(file1, file2, seqNum, caseCount, bCI, resultFileType, dsType):\n returnVal = 0\n if file1 == \"\" or file2 == \"\":\n print('文件路径不能为空:第一个文件的路径:{0}, 第二个文件的路径:{1} .'.format(file1, file2))\n sys.stdout.flush()\n sys.exit()\n else:\n print(\"正在比较标准答案结果文件 {0} 和用户编写的应用程序结果文件 {1}\".format(file1, file2), end=': ')\n sys.stdout.flush()\n\n if os.path.isfile(file1) and os.path.isfile(file2):\n comResult = 0\n if resultFileType == ResultFileType.writedat:\n comResult = advanced_dat_file_compare(file1, file2)\n else:\n comResult = advanced_file_compare(file1, file2)\n\n if comResult: \n print(\"文件相同\")\n score = 40\n if seqNum == caseCount:\n score = 100\n else:\n score = score + 60 / caseCount * seqNum\n\n if bCI :\n promptInfo = \"Case{0} 验证成功\".format(seqNum)\n outputPromptInfo(bCI, promptInfo, Color.green)\n print(\"exec-score\", int(score))\n print()\n sys.stdout.flush() \n else:\n promptInfo = \"Case{0} 验证成功, 分数: {1}\".format(seqNum, int(score))\n outputPromptInfo(bCI, promptInfo, Color.green)\n\n if seqNum == caseCount:\n promptInfo = \"恭喜你通过了所有测试!\"\n outputPromptInfo(bCI, promptInfo, Color.green)\n\n returnVal = 1\n return returnVal\n else:\n print(\"文件不同\")\n promptInfo = \"Case{0} 验证失败\".format(seqNum)\n outputPromptInfo(bCI, promptInfo, Color.green)\n\n if bCI == False and resultFileType != ResultFileType.writedat:\n text1_lines = read_file(file1)\n text2_lines = read_file(file2)\n diff = difflib.HtmlDiff() # 创建HtmlDiff 对象\n result = diff.make_file(text1_lines, text2_lines) # 通过make_file 方法输出 html 格式的对比结果\n # 将结果写入到result_comparation.html文件中\n try:\n with open('result_comparation.html', 'a+', encoding=\"utf-8\") as result_file:\n dsFile = getDataSourceFileStr(dsType).format(seqNum)\n resultfile = getResultFileStr(resultFileType).format(seqNum)\n userresultfile = getUserResultFileStr(resultFileType).format(seqNum)\n platformurl = getPlatformURL() + \"/engintime/codecode/publicmanual/blob/master/comp-file-desc.md\"\n promptContent = \"

第一次查看此页面吗?查看页面说明

\".format(platformurl)\n promptContent += \"

Case {0} 验证失败。使用的数据源文件是 {1}。
标准答案结果文件 {2}(左边)与用户编写的应用程序结果文件 {3}(右边)的比较结果:

\".format(seqNum, dsFile, resultfile, userresultfile)\n result = promptContent + result\n result_file.write(result)\n except IOError as error:\n print('写入html文件错误:{0}'.format(error))\n finally:\n return returnVal\n else:\n return returnVal\n\n\n# 1表示绿色,2表示红色,3表示黄色\ndef outputPromptInfo(bCI, promptInfo, color):\n\n if bCI :\n if color == Color.green:\n print(green(promptInfo))\n elif color == Color.red:\n print(red(promptInfo))\n elif color == Color.yellow:\n print(yellow(promptInfo))\n else:\n print(promptInfo)\n sys.stdout.flush()\n else:\n if color == Color.green:\n print(Fore.GREEN + promptInfo, file = stream)\n elif color == Color.red:\n print(Fore.RED + promptInfo, file = stream)\n elif color == Color.yellow:\n print(Fore.YELLOW + promptInfo, file = stream) \n else:\n print(promptInfo) \n print(Fore.WHITE, file = stream)\n sys.stdout.flush()\n\nif __name__ == \"__main__\":\n \n compResultFile = \"result_comparation.html\"\n if os.path.isfile(compResultFile):\n os.remove(compResultFile)\n\n promptInfo = \"正在使用 makefile 文件生成项目\"\n outputPromptInfo(bCI, promptInfo, Color.green)\n execResult = os.system(\"make\")\n\n if execResult != 0:\n errorInfo = \"生成项目失败\"\n outputPromptInfo(bCI, errorInfo, Color.red)\n exit(1) \n else:\n score = 40\n if not os.path.isfile(\"output1.txt\") and not os.path.isfile(\"writefile1.txt\") and not os.path.isfile(\"writefile1.dat\"):\n score = 100\n if bCI:\n promptInfo = \"生成项目成功\"\n outputPromptInfo(bCI, promptInfo, Color.green)\n promptInfo = \"exec-score {0}\".format(score)\n print(promptInfo)\n print()\n sys.stdout.flush() \n else:\n promptInfo = \"生成项目成功, 分数 {0}\".format(score)\n outputPromptInfo(bCI, promptInfo, Color.green)\n\n dsType, dsFileStr, resultFileType, userresultFileStr, resultFileStr = getProjInfo() \n\n if resultFileType == ResultFileType.nooutput:\n exit(0) \n\n # 获取case的数量\n caseCount = 1\n while 1:\n resultFile = resultFileStr.format(caseCount)\n if not os.path.isfile(resultFile):\n caseCount -= 1\n break\n caseCount += 1\n\n if caseCount > 0:\n if bCI :\n print(yellow(\"标准答案结果文件和用户输出结果文件的比较原则是:比较时忽略行尾空白字符和文件末尾的空白行。\"))\n else:\n print(Fore.YELLOW + \"标准答案结果文件和用户输出结果文件的比较原则是:比较时忽略行尾空白字符和文件末尾的空白行。\", file = stream, end='')\n print(Fore.WHITE, file = stream)\n\n seqNum = 1\n while 1:\n dsFile = dsFileStr.format(seqNum) \n resultFile = resultFileStr.format(seqNum)\n userresultFile = userresultFileStr.format(seqNum)\n if seqNum == 1 and not os.path.isfile(resultFile):\n promptInfo = \"该项目未提供自动化验证功能\"\n if bCI :\n print(red(promptInfo))\n else:\n print(Fore.RED + \"该项目未提供自动化验证功能\", file = stream, end='')\n print(Fore.WHITE, file = stream)\n break\n if not os.path.isfile(resultFile):\n break\n \n if dsType == DataSourceFileType.noinput:\n print(\"正在验证 case{0}\".format(seqNum))\n else:\n print(\"正在使用数据源文件 {0} 验证 case{1}\".format(dsFile, seqNum))\n\n runCmdStr =getRunProgCmdStr(dsType, resultFileType, dsFile, userresultFile)\n \n promptInfo = \"正在执行命令: {0}\".format(runCmdStr)\n outputPromptInfo(bCI, promptInfo, Color.white)\n\n if bCI == False:\n promptInfo = \"提示:如果验证程序长时间未结束,说明应用程序中可能存在死循环。请停止验证程序(Ctrl+c),修改应用程序后再验证。\"\n outputPromptInfo(bCI, promptInfo, Color.yellow)\n\n execResult = os.system(runCmdStr)\n if execResult != 0:\n errorInfo = \"应用程序执行异常,返回值:{0}。\".format(execResult)\n outputPromptInfo(bCI, errorInfo, Color.red)\n exit(1)\n \n if os.path.isfile(resultFile) and os.path.isfile(userresultFile):\n if compare_file(resultFile, userresultFile, seqNum, caseCount, bCI, resultFileType, dsType) == 0:\n if bCI :\n if dsType != DataSourceFileType.noinput:\n print(\"使用的数据源文件是 {0}。\".format(dsFile))\n \n print(\"标准答案结果文件 {0}(左边)与用户编写的应用程序结果文件 {1}(右边)的比较结果:\".format(resultFile, userresultFile))\n sys.stdout.flush() \n runCommand = \"diff {0} {1} -b -B -y -i -W 100\".format(resultFile, userresultFile) \n execResult = os.system(runCommand)\n if execResult != 0:\n print()\n exit(1)\n else:\n if resultFileType != ResultFileType.writedat:\n if os.path.exists('/.dockerenv'):\n promptInfo = \"查看文件比较结果可帮助你查找验证失败的原因。方法是:\\n选择 View 菜单中的 Explorer 打开文件列表,右键点击 result_comparation.html 文件,在弹出的菜单中选择 Open Preview\" \n else:\n promptInfo = \"已经使用浏览器打开了结果比较文件result_comparation.html\"\n webbrowser.open(\"result_comparation.html\") \n else:\n promptInfo = \"选择 View 菜单中的 Explorer 打开文件列表,右键点击 writefile*.dat文件或user_writefile*.dat文件,在弹出的菜单中选择 Open With...,再选择 Hex Editor \"\n outputPromptInfo(bCI, promptInfo, Color.red)\n exit(1)\n seqNum = seqNum + 1\n", "sub_path": "学习/编译原理/实验作业/Experiment7/.vscode/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 10735, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "enum.Enum", "line_number": 18, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 26, "usage_type": "attribute"}, {"api_name": "colorama.init", "line_number": 39, "usage_type": "call"}, {"api_name": "colorama.AnsiToWin32", "line_number": 40, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 40, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 52, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 52, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 53, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 56, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 78, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 78, "usage_type": "attribute"}, {"api_name": "difflib.HtmlDiff", "line_number": 97, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 130, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 130, "usage_type": "attribute"}, {"api_name": "colorama.Fore.GREEN", "line_number": 133, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 133, "usage_type": "name"}, {"api_name": "colorama.Fore.RED", "line_number": 135, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 135, "usage_type": "name"}, {"api_name": "colorama.Fore.YELLOW", "line_number": 137, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 137, "usage_type": "name"}, {"api_name": "colorama.Fore.WHITE", "line_number": 140, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 140, "usage_type": "name"}, {"api_name": "sys.stdout.flush", "line_number": 141, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 141, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 146, "usage_type": "call"}, {"api_name": "os.path", "line_number": 146, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 147, "usage_type": "call"}, {"api_name": "os.system", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path", "line_number": 159, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 167, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 167, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 181, "usage_type": "call"}, {"api_name": "os.path", "line_number": 181, "usage_type": "attribute"}, {"api_name": "colorama.Fore.YELLOW", "line_number": 190, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 190, "usage_type": "name"}, {"api_name": "colorama.Fore.WHITE", "line_number": 191, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 191, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 198, "usage_type": "call"}, {"api_name": "os.path", "line_number": 198, "usage_type": "attribute"}, {"api_name": "colorama.Fore.RED", "line_number": 203, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 203, "usage_type": "name"}, {"api_name": "colorama.Fore.WHITE", "line_number": 204, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 204, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 206, "usage_type": "call"}, {"api_name": "os.path", "line_number": 206, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 223, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 229, "usage_type": "call"}, {"api_name": "os.path", "line_number": 229, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 236, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 236, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 238, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 244, "usage_type": "call"}, {"api_name": "os.path", "line_number": 244, "usage_type": "attribute"}, {"api_name": "webbrowser.open", "line_number": 248, "usage_type": "call"}]} +{"seq_id": "20419062", "text": "\"\"\"wedding URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom guests.views import HelloView, CoupleLoginView, GuestLoginView, GuestRegisterView, CouplePageView, CoupleLogoutView, GuestLogoutView, SpousePageView, SpouseAddInfoView, SpouseInfoView, SpouseEditInfoView, SpouseDeleteView, WeddingPageView, AddWeddingInfoView, WeddingInfoView, EditWeddingInfoView, WeddingDeleteView, GuestPageView, GuestInfoView, GuestAddInfoView, GuestDeleteInfoView, GuestEditInfoView, GuestCoupleInfoView, GuestWeddingInfoView, CoupleRegisterView\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^hello/', HelloView.as_view(), name='hello'),\n url(r'^couple_login/', CoupleLoginView.as_view(), name='couple-login'),\n url(r'^couple_register', CoupleRegisterView.as_view(), name='couple-register'),\n url(r'^guest_login/', GuestLoginView.as_view(), name='guest-login'),\n url(r'^guest_register/', GuestRegisterView.as_view(), name='guest-register'),\n url(r'^couple_page', CouplePageView.as_view(), name='couple-page'),\n url(r'^couple_logut/', CoupleLogoutView.as_view(), name='couple-logout'),\n url(r'^guest_logout/', GuestLogoutView.as_view(), name='guest-logout'),\n url(r'^spouse_page', SpousePageView.as_view()),\n url(r'^spouse_add_info', SpouseAddInfoView.as_view(), name='spouse-add-info'),\n url(r'^spouse_info', SpouseInfoView.as_view(), name='spouse-info'),\n url(r'^spouse_edit_info/(?P(\\d)+)$', SpouseEditInfoView.as_view(), name='spouse-edit-info'),\n url(r'^spouse_delete/(?P(\\d)+)$', SpouseDeleteView.as_view(), name='spouse-delete'),\n url(r'^wedding_page/', WeddingPageView.as_view(), name='wedding-page'),\n url(r'^wedding_add_info', AddWeddingInfoView.as_view(), name='add-wedding-info'),\n url(r'^wedding_info', WeddingInfoView.as_view(), name='wedding-info'),\n url(r'^wedding_edit_info/(?P(\\d)+)$', EditWeddingInfoView.as_view(), name='wedding-edit-info'),\n url(r'^wedding_delete/(?P(\\d)+)$', WeddingDeleteView.as_view(), name='wedding-delete'),\n url(r'^guest_page/(?P(\\d)+)$', GuestPageView.as_view(), name='guest-page'),\n url(r'^guest_info/(?P(\\d)+)$', GuestInfoView.as_view(), name='guest-info'),\n url(r'^guest_add_info/(?P(\\d)+)$', GuestAddInfoView.as_view(), name='guest-add-info'),\n url(r'^guest_delete_info/(?P(\\d)+)$', GuestDeleteInfoView.as_view(), name='guest-delete-info'),\n url(r'^guest_edit_info/(?P(\\d)+)$', GuestEditInfoView.as_view(), name='guest-edit-info'),\n url(r'^guest_couple_info', GuestCoupleInfoView.as_view(), name='guest-couple-info'),\n url(r'^guest_wedding_info', GuestWeddingInfoView.as_view(), name='guest-wedding-info')\n\n]\n", "sub_path": "wedding/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 3377, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.conf.urls.url", "line_number": 21, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 21, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 22, "usage_type": "call"}, {"api_name": "guests.views.HelloView.as_view", "line_number": 22, "usage_type": "call"}, {"api_name": "guests.views.HelloView", "line_number": 22, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 23, "usage_type": "call"}, {"api_name": "guests.views.CoupleLoginView.as_view", "line_number": 23, "usage_type": "call"}, {"api_name": "guests.views.CoupleLoginView", "line_number": 23, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 24, "usage_type": "call"}, {"api_name": "guests.views.CoupleRegisterView.as_view", "line_number": 24, "usage_type": "call"}, {"api_name": "guests.views.CoupleRegisterView", "line_number": 24, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}, {"api_name": "guests.views.GuestLoginView.as_view", "line_number": 25, "usage_type": "call"}, {"api_name": "guests.views.GuestLoginView", "line_number": 25, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 26, "usage_type": "call"}, {"api_name": "guests.views.GuestRegisterView.as_view", "line_number": 26, "usage_type": "call"}, {"api_name": "guests.views.GuestRegisterView", "line_number": 26, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 27, "usage_type": "call"}, {"api_name": "guests.views.CouplePageView.as_view", "line_number": 27, "usage_type": "call"}, {"api_name": "guests.views.CouplePageView", "line_number": 27, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 28, "usage_type": "call"}, {"api_name": "guests.views.CoupleLogoutView.as_view", "line_number": 28, "usage_type": "call"}, {"api_name": "guests.views.CoupleLogoutView", "line_number": 28, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 29, "usage_type": "call"}, {"api_name": "guests.views.GuestLogoutView.as_view", "line_number": 29, "usage_type": "call"}, {"api_name": "guests.views.GuestLogoutView", "line_number": 29, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 30, "usage_type": "call"}, {"api_name": "guests.views.SpousePageView.as_view", "line_number": 30, "usage_type": "call"}, {"api_name": "guests.views.SpousePageView", "line_number": 30, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 31, "usage_type": "call"}, {"api_name": "guests.views.SpouseAddInfoView.as_view", "line_number": 31, "usage_type": "call"}, {"api_name": "guests.views.SpouseAddInfoView", "line_number": 31, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 32, "usage_type": "call"}, {"api_name": "guests.views.SpouseInfoView.as_view", "line_number": 32, "usage_type": "call"}, {"api_name": "guests.views.SpouseInfoView", "line_number": 32, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 33, "usage_type": "call"}, {"api_name": "guests.views.SpouseEditInfoView.as_view", "line_number": 33, "usage_type": "call"}, {"api_name": "guests.views.SpouseEditInfoView", "line_number": 33, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 34, "usage_type": "call"}, {"api_name": "guests.views.SpouseDeleteView.as_view", "line_number": 34, "usage_type": "call"}, {"api_name": "guests.views.SpouseDeleteView", "line_number": 34, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 35, "usage_type": "call"}, {"api_name": "guests.views.WeddingPageView.as_view", "line_number": 35, "usage_type": "call"}, {"api_name": "guests.views.WeddingPageView", "line_number": 35, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 36, "usage_type": "call"}, {"api_name": "guests.views.AddWeddingInfoView.as_view", "line_number": 36, "usage_type": "call"}, {"api_name": "guests.views.AddWeddingInfoView", "line_number": 36, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 37, "usage_type": "call"}, {"api_name": "guests.views.WeddingInfoView.as_view", "line_number": 37, "usage_type": "call"}, {"api_name": "guests.views.WeddingInfoView", "line_number": 37, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 38, "usage_type": "call"}, {"api_name": "guests.views.EditWeddingInfoView.as_view", "line_number": 38, "usage_type": "call"}, {"api_name": "guests.views.EditWeddingInfoView", "line_number": 38, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 39, "usage_type": "call"}, {"api_name": "guests.views.WeddingDeleteView.as_view", "line_number": 39, "usage_type": "call"}, {"api_name": "guests.views.WeddingDeleteView", "line_number": 39, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 40, "usage_type": "call"}, {"api_name": "guests.views.GuestPageView.as_view", "line_number": 40, "usage_type": "call"}, {"api_name": "guests.views.GuestPageView", "line_number": 40, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 41, "usage_type": "call"}, {"api_name": "guests.views.GuestInfoView.as_view", "line_number": 41, "usage_type": "call"}, {"api_name": "guests.views.GuestInfoView", "line_number": 41, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 42, "usage_type": "call"}, {"api_name": "guests.views.GuestAddInfoView.as_view", "line_number": 42, "usage_type": "call"}, {"api_name": "guests.views.GuestAddInfoView", "line_number": 42, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 43, "usage_type": "call"}, {"api_name": "guests.views.GuestDeleteInfoView.as_view", "line_number": 43, "usage_type": "call"}, {"api_name": "guests.views.GuestDeleteInfoView", "line_number": 43, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 44, "usage_type": "call"}, {"api_name": "guests.views.GuestEditInfoView.as_view", "line_number": 44, "usage_type": "call"}, {"api_name": "guests.views.GuestEditInfoView", "line_number": 44, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 45, "usage_type": "call"}, {"api_name": "guests.views.GuestCoupleInfoView.as_view", "line_number": 45, "usage_type": "call"}, {"api_name": "guests.views.GuestCoupleInfoView", "line_number": 45, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 46, "usage_type": "call"}, {"api_name": "guests.views.GuestWeddingInfoView.as_view", "line_number": 46, "usage_type": "call"}, {"api_name": "guests.views.GuestWeddingInfoView", "line_number": 46, "usage_type": "name"}]} +{"seq_id": "338436184", "text": "import numpy as np\r\nfrom nltk import TweetTokenizer, accuracy\r\nfrom nltk.stem.snowball import EnglishStemmer\r\nfrom sklearn import tree\r\nfrom sklearn.cross_validation import StratifiedKFold\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.metrics import precision_recall_fscore_support\r\n\r\nfrom sentiment_util import load_datasets\r\n\r\n\r\ndef main():\r\n # x, y = load_dataset(\"datasets/sentiment_uci/yelp_labelled.txt\")\r\n x, y = load_datasets([\"../datasets/sentiment_uci/yelp_labelled.txt\"])\r\n\r\n stopwords = set()\r\n with open('../stopwords.txt', 'r') as f:\r\n for w in f:\r\n stopwords.add(w)\r\n\r\n tok = TweetTokenizer()\r\n stemmer = EnglishStemmer()\r\n vectorizer = TfidfVectorizer(sublinear_tf=True, use_idf=True, binary=True, preprocessor=stemmer.stem,\r\n tokenizer=tok.tokenize, ngram_range=(1, 2))\r\n\r\n accu_p = np.zeros(shape=(2,))\r\n accu_r = np.zeros(shape=(2,))\r\n accu_f = np.zeros(shape=(2,))\r\n accu_a = 0.0\r\n folds = 10\r\n for train_idx, test_idx in StratifiedKFold(y=y, n_folds=folds, shuffle=True):\r\n train_x, train_y = x[train_idx], y[train_idx]\r\n test_x, test_y = x[test_idx], y[test_idx]\r\n\r\n cls = tree.DecisionTreeClassifier()\r\n\r\n # train\r\n train_x = vectorizer.fit_transform(train_x).toarray()\r\n\r\n cls.fit(train_x, train_y)\r\n\r\n # test\r\n test_x = vectorizer.transform(test_x).toarray()\r\n\r\n pred_y = cls.predict(test_x)\r\n\r\n # evaluate\r\n p, r, f, _ = precision_recall_fscore_support(test_y, pred_y)\r\n a = accuracy_score(test_y, pred_y)\r\n accu_p += p\r\n accu_r += r\r\n accu_f += f\r\n accu_a += a\r\n\r\n print(\"Evaluating classifier:\")\r\n print(\"\\tAccuracy: {}\".format(a))\r\n print(\"\\tPrecision[0]: {}\".format(p[0]))\r\n print(\"\\tPrecision[1]: {}\".format(p[1]))\r\n print(\"\\tRecall[0]: {}\".format(r[0]))\r\n print(\"\\tRecall[1]: {}\".format(r[1]))\r\n print(\"\\tF1-score[0]: {}\".format(f[0]))\r\n print(\"\\tF1-score[1]: {}\".format(f[1]))\r\n\r\n print(\"Average evaluation\")\r\n print(\"\\tAccuracy: {}\".format(accu_a / folds))\r\n print(\"\\tPrecision[0]: {}\".format(accu_p[0] / folds))\r\n print(\"\\tPrecision[1]: {}\".format(accu_p[1] / folds))\r\n print(\"\\tRecall[0]: {}\".format(accu_r[0] / folds))\r\n print(\"\\tRecall[1]: {}\".format(accu_r[1] / folds))\r\n print(\"\\tF1-score[0]: {}\".format(accu_f[0] / folds))\r\n print(\"\\tF1-score[1]: {}\".format(accu_f[1] / folds))\r\n\r\nif __name__ == '__main__':\r\n main()\r\n", "sub_path": "yelp-sentiment/experiments/sentiment_decisiontree.py", "file_name": "sentiment_decisiontree.py", "file_ext": "py", "file_size_in_byte": 2667, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "sentiment_util.load_datasets", "line_number": 16, "usage_type": "call"}, {"api_name": "nltk.TweetTokenizer", "line_number": 23, "usage_type": "call"}, {"api_name": "nltk.stem.snowball.EnglishStemmer", "line_number": 24, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 30, "usage_type": "call"}, {"api_name": "sklearn.cross_validation.StratifiedKFold", "line_number": 33, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 37, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 37, "usage_type": "name"}, {"api_name": "sklearn.metrics.precision_recall_fscore_support", "line_number": 50, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "218149413", "text": "#!coding=utf-8\nimport requests\n\nfrom api.config.readconfig import ReadConfig\nimport unittest\n\n\nclass test_v13recordDetails(unittest.TestCase):\n def test_LD(self):\n access_token=ReadConfig().get_value(\"params\",\"access_token\")\n ip = ReadConfig().get_value(\"url\",\"ip\")\n adi = \"v1.3/conference/{cid}/handup/cancel/{puuid}\"\n url = \"/\".join((ip , adi))\n header = {\n \"Authorization\": (\"Bearer\" + \" \" + access_token),\n \"Content-Type\":\"application/x-www-form-urlencoded\"\n }\n\n\n r = requests.post(url,headers = header,verify = False)\n print(r.text)\n self.assertEqual(200,r.status_code)\n # assert \"该视频\" in r.text\n\n\nif __name__ == \"__main__\":\n unittest.main()", "sub_path": "app_api/alltestcase/Double_check/test_Confer_CancelHandup.py", "file_name": "test_Confer_CancelHandup.py", "file_ext": "py", "file_size_in_byte": 768, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "unittest.TestCase", "line_number": 8, "usage_type": "attribute"}, {"api_name": "api.config.readconfig.ReadConfig", "line_number": 10, "usage_type": "call"}, {"api_name": "api.config.readconfig.ReadConfig", "line_number": 11, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 20, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "541828817", "text": "from paho.mqtt import client as mqtt_client\r\nfrom datetime import datetime\r\n\r\nfrom importlib import import_module\r\nmain = import_module(\"main\")\r\n\r\ndef connect_mqtt():\r\n\r\n def on_connect(client, userdata, flags, rc):\r\n if rc == 0:\r\n log(\"Connected to MQTT Broker!\")\r\n else:\r\n log(\"Failed to connect, return code %d\\n\", rc)\r\n\r\n # Set Connecting Client ID\r\n client = mqtt_client.Client(main.client_id)\r\n if main.username != \"\" and main.password != \"\":\r\n client.username_pw_set(main.username, main.password)\r\n client.on_connect = on_connect\r\n client.connect(main.broker, main.port)\r\n return client\r\n\r\ndef publish(client, topic, msg):\r\n prefix = main.prefix\r\n msg_count = 0\r\n result = client.publish(f'{prefix}/{topic}', str(msg), qos=main.qos, retain=int(main.retain))\r\n status = result[0]\r\n if status == 0:\r\n log(f\" MQTT Send : {prefix}/{topic} => {msg}\")\r\n else:\r\n log(f\" - Failed to send message to topic {prefix}/{topic}\")\r\n msg_count += 1\r\n\r\ndef log(msg):\r\n now = datetime.now()\r\n print(f\"{now} : {msg}\")\r\n\r\ndef splitLog(msg):\r\n format_log = \"\"\r\n i = 1\r\n nb_col = 12\r\n msg_length = len(msg)\r\n cur_length = 1\r\n for log_msg in msg:\r\n format_log += f\" | {log_msg}\"\r\n if i == nb_col:\r\n i = 1\r\n format_log += f\" |\"\r\n log(format_log)\r\n format_log = \"\"\r\n elif cur_length == msg_length:\r\n format_log += f\" |\"\r\n log(format_log)\r\n else:\r\n i = i + 1\r\n cur_length = cur_length + 1", "sub_path": "app/function.py", "file_name": "function.py", "file_ext": "py", "file_size_in_byte": 1604, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "importlib.import_module", "line_number": 5, "usage_type": "call"}, {"api_name": "paho.mqtt.client.Client", "line_number": 16, "usage_type": "call"}, {"api_name": "paho.mqtt.client", "line_number": 16, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "429997620", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'此模块功能:利用标准库asyncio实现异步IO'\n\n\n__author__ = 'HouBin'\n\nimport threading\nimport asyncio\n\n#通过async把一个生成器标记为协程类型\nasync def hello(name):\n print('Hello, %s! (%s)' % (name, threading.currentThread()))\n await asyncio.sleep(1)\n print('See you again, %s! (%s)' % (name, threading.currentThread()))\n\nloop = asyncio.get_event_loop()\ntasks = [hello(\"Zhangsan\"), hello(\"Lisi\"), hello(\"Wangwu\")]\nloop.run_until_complete(asyncio.wait(tasks))\nloop.close()\n", "sub_path": "10-Async/AsyncHello1.py", "file_name": "AsyncHello1.py", "file_ext": "py", "file_size_in_byte": 554, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "threading.currentThread", "line_number": 14, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 15, "usage_type": "call"}, {"api_name": "threading.currentThread", "line_number": 16, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 18, "usage_type": "call"}, {"api_name": "asyncio.wait", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "279175391", "text": "from ckeditor_uploader.widgets import CKEditorUploadingWidget\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.db.models import BLANK_CHOICE_DASH\nfrom django.utils.translation import gettext\n\nfrom grandchallenge.core.urlresolvers import reverse\nfrom grandchallenge.pages.models import Page\n\n\nclass PageCreateForm(forms.ModelForm):\n html = forms.CharField(widget=CKEditorUploadingWidget())\n\n def __init__(self, *args, **kwargs):\n self.challenge = kwargs.pop(\"challenge\", None)\n super().__init__(*args, **kwargs)\n if self.challenge is not None and \"html\" in self.fields:\n self.fields[\"html\"].widget.config.update(\n {\n \"filebrowserUploadUrl\": reverse(\n \"uploads:ck-create\",\n kwargs={\n \"challenge_short_name\": self.challenge.short_name\n },\n ),\n \"filebrowserBrowseUrl\": reverse(\n \"uploads:ck-browse\",\n kwargs={\n \"challenge_short_name\": self.challenge.short_name\n },\n ),\n }\n )\n\n if self.challenge.allow_unfiltered_page_html:\n self.fields[\"html\"].widget.config.update(\n {\"allowedContent\": True}\n )\n\n self.helper = FormHelper(self)\n self.helper.layout.append(Submit(\"save\", \"Save\"))\n\n def clean_title(self):\n \"\"\" Ensure that page titles are not duplicated for a challenge \"\"\"\n title = self.cleaned_data[\"title\"]\n queryset = Page.objects.filter(\n challenge=self.challenge, title__iexact=title\n )\n\n if self.instance is not None:\n queryset = queryset.exclude(pk=self.instance.pk)\n\n if queryset.exists():\n raise ValidationError(\n gettext(\n \"A page with that title already exists for this challenge\"\n ),\n code=\"duplicate\",\n )\n\n return title\n\n class Meta:\n model = Page\n fields = (\"title\", \"permission_lvl\", \"display_title\", \"hidden\", \"html\")\n\n\nclass PageUpdateForm(PageCreateForm):\n \"\"\" Like the page update form but you can also move the page \"\"\"\n\n move = forms.CharField(widget=forms.Select)\n move.required = False\n move.widget.choices = (\n (BLANK_CHOICE_DASH[0]),\n (Page.FIRST, \"First\"),\n (Page.UP, \"Up\"),\n (Page.DOWN, \"Down\"),\n (Page.LAST, \"Last\"),\n )\n", "sub_path": "app/grandchallenge/pages/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 2712, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.forms.ModelForm", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 13, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 14, "usage_type": "name"}, {"api_name": "ckeditor_uploader.widgets.CKEditorUploadingWidget", "line_number": 14, "usage_type": "call"}, {"api_name": "grandchallenge.core.urlresolvers.reverse", "line_number": 22, "usage_type": "call"}, {"api_name": "grandchallenge.core.urlresolvers.reverse", "line_number": 28, "usage_type": "call"}, {"api_name": "crispy_forms.helper.FormHelper", "line_number": 42, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Submit", "line_number": 43, "usage_type": "call"}, {"api_name": "grandchallenge.pages.models.Page.objects.filter", "line_number": 48, "usage_type": "call"}, {"api_name": "grandchallenge.pages.models.Page.objects", "line_number": 48, "usage_type": "attribute"}, {"api_name": "grandchallenge.pages.models.Page", "line_number": 48, "usage_type": "name"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 56, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 57, "usage_type": "call"}, {"api_name": "grandchallenge.pages.models.Page", "line_number": 66, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 73, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 73, "usage_type": "name"}, {"api_name": "django.forms.Select", "line_number": 73, "usage_type": "attribute"}, {"api_name": "django.db.models.BLANK_CHOICE_DASH", "line_number": 76, "usage_type": "name"}, {"api_name": "grandchallenge.pages.models.Page.FIRST", "line_number": 77, "usage_type": "attribute"}, {"api_name": "grandchallenge.pages.models.Page", "line_number": 77, "usage_type": "name"}, {"api_name": "grandchallenge.pages.models.Page.UP", "line_number": 78, "usage_type": "attribute"}, {"api_name": "grandchallenge.pages.models.Page", "line_number": 78, "usage_type": "name"}, {"api_name": "grandchallenge.pages.models.Page.DOWN", "line_number": 79, "usage_type": "attribute"}, {"api_name": "grandchallenge.pages.models.Page", "line_number": 79, "usage_type": "name"}, {"api_name": "grandchallenge.pages.models.Page.LAST", "line_number": 80, "usage_type": "attribute"}, {"api_name": "grandchallenge.pages.models.Page", "line_number": 80, "usage_type": "name"}]} +{"seq_id": "356968056", "text": "import os\nimport config as cfg\nimport xml.etree.ElementTree as ET\n\n'''\n更改大小写\n应用于xml文件中的指定字段\n'''\n\nsource = os.path.join(cfg.DATA_PATH, 'Annotations')\n\ncount = 0\n\nfor file in sorted(os.listdir(source)):\n xml_file = os.path.join(source, file)\n tree = ET.parse(xml_file)\n obj = tree.find('object')\n name = obj.find('name')\n na = name.text\n if not na.islower() and len(na) == 1:\n name.text = na.lower()\n tree.write(xml_file)\n print('file %s change %s to %s' % (xml_file, na, name.text))\n count += 1\n\nprint(count)\n", "sub_path": "tools/change_upper_or_lower.py", "file_name": "change_upper_or_lower.py", "file_ext": "py", "file_size_in_byte": 586, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "config.DATA_PATH", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 16, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "406823948", "text": "#-----------------------------\n# Cloud Computing\n# Projet 1 Ex 4\n# SOETENS Gatien BRUFAU Thomas\n#-----------------------------\n\n# Import des modules\nimport pymongo\nfrom pymongo import MongoClient\n\n# Connexion au cluster\ncluster=MongoClient(\"mongodb+srv://Gatens:test1234@cluster0.sppia.mongodb.net/bycicle_services?retryWrites=true&w=majority\")\ndb=cluster[\"bycicle_services\"]\ncollection=db[\"Lille\"]\n\n#recherche une station par un nom (quelques lettres)\ndef findStation(recherche):\n result = collection.find({\"fields.nom\":{\"$regex\": recherche, \"$options\":'i'}})\n for i in result:\n print(i)\n\n#findStation(\"militaire\")\n\n# Update a station\ndef update(station):\n collection.update_one(\n {\"fields.nom\":station},\n {'$set': {'fields.etat':'HORS SERVICE'}} # passe l'etat de la station a hors service\n )\n\n#update(\"N.D. DE LA TREILLE\")\n# Remove station and data\ndef remove(station):\n query={\"fields.nom\":station}\n collection.delete_one(query)\n\n#remove(\"N.D. DE LA TREILLE\")\n\n# Deactivate all stations in an area\ndef deactivate(): #put stations around a polygon in state : HORS SERVICE\n result = collection.find({\"geometry\": { \n \"$near\" : {\n \"$geometry\": {\n \"type\": \"Polygon\",\n \"coordinates\":[ 3.0629778380918538 ,50.64105303104528]\n },\n \"$maxDistance\": 300,\n \"$minDistance\": 0\n}\n}})\n for i in result:\n update(i[\"fields\"][\"nom\"])\n print(i)\n\ndeactivate()", "sub_path": "exercice4.py", "file_name": "exercice4.py", "file_ext": "py", "file_size_in_byte": 1458, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pymongo.MongoClient", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "26558408", "text": "from django.urls import path\n\nfrom blog.views import PostListView, PostCreationView, CommentCreateView, CommentListView, PostDetailView, CreateCommentsView\n\napp_name = 'blog-api'\n\nurlpatterns = [\n path('postlist/', PostListView.as_view(), name='posts-list'),\n path('create/', PostCreationView.as_view(), name=\"post-creation\"),\n path('comments//', CommentListView.as_view(), name=\"post-comment\"),\n path('posts//', PostDetailView.as_view(), name='post-detail'),\n path('comment/create/', CreateCommentsView.as_view(), name='comment-create'),\n]", "sub_path": "blog/api/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 572, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "blog.views.PostListView.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "blog.views.PostListView", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "blog.views.PostCreationView.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "blog.views.PostCreationView", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "blog.views.CommentListView.as_view", "line_number": 10, "usage_type": "call"}, {"api_name": "blog.views.CommentListView", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "blog.views.PostDetailView.as_view", "line_number": 11, "usage_type": "call"}, {"api_name": "blog.views.PostDetailView", "line_number": 11, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "blog.views.CreateCommentsView.as_view", "line_number": 12, "usage_type": "call"}, {"api_name": "blog.views.CreateCommentsView", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "20794167", "text": "import sys\n\nfrom setuptools import setup, find_packages\nfrom os.path import dirname, join\n\n# this smells, but I don't know how to do better than this right now.\nVERSION=open(join(dirname(__file__), \"version.txt\")).read().strip()\n\ninstall_requires = [\n 'Click==3.3',\n 'unittest2 == 0.8.0',\n 'setuptools == 12.0.5'\n]\n\nif sys.version_info[1] == 6:\n install_requires.append(\"importlib\")\n\nsetup(\n name='docker-rpm-builder',\n description=\"Build native RPMs through docker\",\n long_description=open(\"README.md\").read(),\n author=\"Alan Franzoni\",\n author_email=\"username@franzoni.eu\",\n url=\"https://github.com/alanfranz/docker-rpm-builder\",\n version=VERSION,\n packages=find_packages(),\n install_requires=install_requires,\n entry_points='''\n [console_scripts]\n docker-rpm-builder=drb.cmdline:cmdline\n ''',\n setup_requires = [\"setuptools_git == 1.1\" ],\n license=\"Apache-2.0\",\n include_package_data=True,\n zip_safe=False\n\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 982, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "os.path.join", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.version_info", "line_number": 15, "usage_type": "attribute"}, {"api_name": "setuptools.setup", "line_number": 18, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "159314506", "text": "#\n# Copyright 2012 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytz\n\nfrom unittest import TestCase\nfrom itertools import chain, izip_longest\nfrom datetime import datetime, timedelta\nfrom collections import deque\n\nfrom zipline import ndict\nfrom zipline.gens.sort import (\n date_sort,\n done,\n queue_is_done\n)\nfrom zipline.gens.utils import alternate, done_message\nfrom zipline.sources import SpecificEquityTrades\nfrom zipline.gens.composites import date_sorted_sources\n\n\nclass HelperTestCase(TestCase):\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def test_individual_queue_logic(self):\n queue = deque()\n # Empty queues are neither done nor ready.\n assert not queue_is_done(queue)\n\n queue.append(to_dt('foo'))\n assert not queue_is_done(queue)\n\n queue.appendleft(to_dt('DONE'))\n\n # Checking done when we have a message after done will trip an assert.\n self.assertRaises(AssertionError, queue_is_done, queue)\n\n queue.pop()\n assert queue_is_done(queue)\n\n def test_pop_logic(self):\n sources = {}\n ids = ['a', 'b', 'c']\n for id in ids:\n sources[id] = deque()\n\n assert not done(sources)\n\n # All sources must have a message to be ready/done\n sources['a'].append(to_dt(\"datetime\"))\n assert not done(sources)\n sources['a'].pop()\n\n for id in ids:\n sources[id].append(to_dt(\"datetime\"))\n\n assert not done(sources)\n\n for id in ids:\n sources[id].appendleft(to_dt(\"DONE\"))\n\n # [\"DONE\", message] will trip an assert in queue_is_done.\n self.assertRaises(AssertionError, done, sources)\n\n for id in ids:\n sources[id].pop()\n\n assert done(sources)\n\n\nclass DateSortTestCase(TestCase):\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def run_date_sort(self, event_stream, expected, source_ids):\n \"\"\"\n Take a list of events, their source_ids, and an expected sorting.\n Assert that date_sort's output agrees with expected.\n \"\"\"\n sort_out = date_sort(event_stream, source_ids)\n for m1, m2 in izip_longest(sort_out, expected):\n assert m1 == m2\n\n def test_single_source(self):\n\n # Just using the built-in defaults. See\n # zipline.sources.py\n source = SpecificEquityTrades()\n expected = list(source)\n source.rewind()\n # The raw source doesn't handle done messaging, so we need to\n # append a done message for sort to work properly.\n with_done = chain(source, [done_message(source.get_hash())])\n self.run_date_sort(with_done, expected, [source.get_hash()])\n\n def test_multi_source(self):\n\n filter = [2, 3]\n args_a = tuple()\n kwargs_a = {\n 'count': 100,\n 'sids': [1, 2, 3],\n 'start': datetime(2012, 1, 3, 15, tzinfo=pytz.utc),\n 'delta': timedelta(minutes=6),\n 'filter': filter\n }\n source_a = SpecificEquityTrades(*args_a, **kwargs_a)\n\n args_b = tuple()\n kwargs_b = {\n 'count': 100,\n 'sids': [2, 3, 4],\n 'start': datetime(2012, 1, 3, 15, tzinfo=pytz.utc),\n 'delta': timedelta(minutes=5),\n 'filter': filter\n }\n source_b = SpecificEquityTrades(*args_b, **kwargs_b)\n\n all_events = list(chain(source_a, source_b))\n\n # The expected output is all events, sorted by dt with\n # source_id as a tiebreaker.\n expected = sorted(all_events, comp)\n source_ids = [source_a.get_hash(), source_b.get_hash()]\n\n # Generating the events list consumes the sources. Rewind them\n # for testing.\n source_a.rewind()\n source_b.rewind()\n\n # Append a done message to each source.\n with_done_a = chain(source_a, [done_message(source_a.get_hash())])\n with_done_b = chain(source_b, [done_message(source_b.get_hash())])\n\n interleaved = alternate(with_done_a, with_done_b)\n\n # Test sort with alternating messages from source_a and\n # source_b.\n self.run_date_sort(interleaved, expected, source_ids)\n\n source_a.rewind()\n source_b.rewind()\n with_done_a = chain(source_a, [done_message(source_a.get_hash())])\n with_done_b = chain(source_b, [done_message(source_b.get_hash())])\n\n sequential = chain(with_done_a, with_done_b)\n\n # Test sort with all messages from a, followed by all messages\n # from b.\n\n self.run_date_sort(sequential, expected, source_ids)\n\n def test_sort_composite(self):\n\n filter = [1, 2]\n\n #Set up source a. One hour between events.\n args_a = tuple()\n kwargs_a = {\n 'count': 100,\n 'sids': [1],\n 'start': datetime(2012, 6, 6, 0),\n 'delta': timedelta(hours=1),\n 'filter': filter\n }\n source_a = SpecificEquityTrades(*args_a, **kwargs_a)\n\n #Set up source b. One day between events.\n args_b = tuple()\n kwargs_b = {\n 'count': 50,\n 'sids': [2],\n 'start': datetime(2012, 6, 6, 0),\n 'delta': timedelta(days=1),\n 'filter': filter\n }\n source_b = SpecificEquityTrades(*args_b, **kwargs_b)\n\n #Set up source c. One minute between events.\n args_c = tuple()\n kwargs_c = {\n 'count': 150,\n 'sids': [1, 2],\n 'start': datetime(2012, 6, 6, 0),\n 'delta': timedelta(minutes=1),\n 'filter': filter\n }\n source_c = SpecificEquityTrades(*args_c, **kwargs_c)\n # Set up source d. This should produce no events because the\n # internal sids don't match the filter.\n args_d = tuple()\n kwargs_d = {\n 'count': 50,\n 'sids': [3],\n 'start': datetime(2012, 6, 6, 0),\n 'delta': timedelta(minutes=1),\n 'filter': filter\n }\n source_d = SpecificEquityTrades(*args_d, **kwargs_d)\n sources = [source_a, source_b, source_c, source_d]\n hashes = [source.get_hash() for source in sources]\n\n sort_out = date_sorted_sources(*sources)\n\n # Read all the values from sort and assert that they arrive in\n # the correct sorting with the expected hash values.\n to_list = list(sort_out)\n copy = to_list[:]\n\n # We should have 300 events (100 from a, 150 from b, 50 from c)\n assert len(to_list) == 300\n\n for e in to_list:\n # All events should match one of our expected source_ids.\n assert e.source_id in hashes\n # But none of them should match source_d.\n assert e.source_id != source_d.get_hash()\n\n # The events should be sorted by dt, with source_id as tiebreaker.\n expected = sorted(copy, comp)\n\n assert to_list == expected\n\n\ndef compare_by_dt_source_id(x, y):\n if x.dt < y.dt:\n return -1\n elif x.dt > y.dt:\n return 1\n\n elif x.source_id < y.source_id:\n return -1\n elif x.source_id > y.source_id:\n return 1\n else:\n return 0\n\n#Alias for ease of use\ncomp = compare_by_dt_source_id\n\n\ndef to_dt(msg):\n return ndict({'dt': msg})\n", "sub_path": "tests/test_sorting.py", "file_name": "test_sorting.py", "file_ext": "py", "file_size_in_byte": 7862, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "unittest.TestCase", "line_number": 34, "usage_type": "name"}, {"api_name": "collections.deque", "line_number": 43, "usage_type": "call"}, {"api_name": "zipline.gens.sort.queue_is_done", "line_number": 45, "usage_type": "call"}, {"api_name": "zipline.gens.sort.queue_is_done", "line_number": 48, "usage_type": "call"}, {"api_name": "zipline.gens.sort.queue_is_done", "line_number": 53, "usage_type": "argument"}, {"api_name": "zipline.gens.sort.queue_is_done", "line_number": 56, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 62, "usage_type": "call"}, {"api_name": "zipline.gens.sort.done", "line_number": 64, "usage_type": "call"}, {"api_name": "zipline.gens.sort.done", "line_number": 68, "usage_type": "call"}, {"api_name": "zipline.gens.sort.done", "line_number": 74, "usage_type": "call"}, {"api_name": "zipline.gens.sort.done", "line_number": 80, "usage_type": "argument"}, {"api_name": "zipline.gens.sort.done", "line_number": 85, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 88, "usage_type": "name"}, {"api_name": "zipline.gens.sort.date_sort", "line_number": 101, "usage_type": "call"}, {"api_name": "itertools.izip_longest", "line_number": 102, "usage_type": "call"}, {"api_name": "zipline.sources.SpecificEquityTrades", "line_number": 109, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 114, "usage_type": "call"}, {"api_name": "zipline.gens.utils.done_message", "line_number": 114, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 124, "usage_type": "call"}, {"api_name": "pytz.utc", "line_number": 124, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 125, "usage_type": "call"}, {"api_name": "zipline.sources.SpecificEquityTrades", "line_number": 128, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 134, "usage_type": "call"}, {"api_name": "pytz.utc", "line_number": 134, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 135, "usage_type": "call"}, {"api_name": "zipline.sources.SpecificEquityTrades", "line_number": 138, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 140, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 153, "usage_type": "call"}, {"api_name": "zipline.gens.utils.done_message", "line_number": 153, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 154, "usage_type": "call"}, {"api_name": "zipline.gens.utils.done_message", "line_number": 154, "usage_type": "call"}, {"api_name": "zipline.gens.utils.alternate", "line_number": 156, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 164, "usage_type": "call"}, {"api_name": "zipline.gens.utils.done_message", "line_number": 164, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 165, "usage_type": "call"}, {"api_name": "zipline.gens.utils.done_message", "line_number": 165, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 167, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 183, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 184, "usage_type": "call"}, {"api_name": "zipline.sources.SpecificEquityTrades", "line_number": 187, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 194, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 195, "usage_type": "call"}, {"api_name": "zipline.sources.SpecificEquityTrades", "line_number": 198, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 205, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 206, "usage_type": "call"}, {"api_name": "zipline.sources.SpecificEquityTrades", "line_number": 209, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 216, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 217, "usage_type": "call"}, {"api_name": "zipline.sources.SpecificEquityTrades", "line_number": 220, "usage_type": "call"}, {"api_name": "zipline.gens.composites.date_sorted_sources", "line_number": 224, "usage_type": "call"}, {"api_name": "zipline.ndict", "line_number": 264, "usage_type": "call"}]} +{"seq_id": "588895724", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 14 12:50:59 2020\n\n@author: u301023\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tables\nimport pandas as pd\n\ntry:\n plt.close('all')\nexcept:\n pass\n\nF=20\nh=1\nc=1\nb=1\nK=8\nJ=6\n\nnt = 10 # number of time steps\ndt = [1e-4,2e-4,4e-4,8e-4,1.6e-3,3.2e-3,6.4e-3,1.28e-2,2.56e-2] # vector of different time steps\nTU = 1 # Time unit\nwpt = 100 # how many timesteps per time unit will be saved\n\ntitle1 = \"Lorenz96_XMode\"\ntitle2 = \"Lorenz96_YMode\"\n\n\n### choose scheme\nscheme = \"RK\" # EF:Euler Forward, RK:Runge Kutta, RRK:Reduced Runge Kutta\n\n# fX = tables.open_file(f'data/TS/{title1}_{scheme}_TS_1.h5', mode='r')\n# fY = tables.open_file(f'data/TS/{title2}_{scheme}_TS_1.h5', mode='r')\n\n# X = fX.root.array_X\n# Y = fY.root.array_Y\n\n# x0 = X[:]\n# y0 = Y[:]\n\n# fX.close()\n# fY.close()\n\nerror = np.zeros((nt-1,2))\n\nx = []\ny = []\n\nfor i in range(0,nt-1):\n fX = tables.open_file(f'data/TS/{title1}_{scheme}_TS_{i+1}.h5', mode='r')\n fY = tables.open_file(f'data/TS/{title2}_{scheme}_TS_{i+1}.h5', mode='r')\n\n X = fX.root.array_X\n Y = fY.root.array_Y\n \n x.append(X[:])\n y.append(Y[:])\n error[i,0] = np.mean(abs(x[i][:195,:]-x[0][:195,:]))\n error[i,1] = np.mean(abs(y[i][:195,:]-y[0][:195,:]))\n \n fX.close()\n fY.close()\n\nplt.figure()\nplt.loglog(dt,error[:,0])\n\nplt.figure()\nplt.loglog(dt,error[:,1])", "sub_path": "Lorenz96_plot_loop_init.py", "file_name": "Lorenz96_plot_loop_init.py", "file_ext": "py", "file_size_in_byte": 1516, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "matplotlib.pyplot.close", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 50, "usage_type": "call"}, {"api_name": "tables.open_file", "line_number": 56, "usage_type": "call"}, {"api_name": "tables.open_file", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.loglog", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.loglog", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}]} +{"seq_id": "648245495", "text": "from pg.cmakelists_generator_root import CMakelistsGeneratorRoot\n\nroot = CMakelistsGeneratorRoot()\nroot.cmakeMinimumVersion = \"3.20\"\nroot.projectDescription = \"Game Engine\"\nroot.projectName = \"Zinet\"\nroot.projectVersion = \"0.1.2\"\nroot.subdirectories = [\"Core\", \"GraphicLayer\", \"EntryPoint\", \"PortCV\"]\nroot.globalCompileOptions = \"/W4 /WX /external:W0 /external:anglebrackets /MP\"\nroot.globalCompileOptionsForAddressSanitizer = \"/fsanitize=address /RTCu /experimental:module-\"\nroot.globalCompileDefinitions = \"ZINET_STATIC ZINET_WINDOWS _DISABLE_VECTOR_ANNOTATION _DISABLE_STRING_ANNOTATION\"\nprojectGenerator.add_generator(root)", "sub_path": "recipe_root.py", "file_name": "recipe_root.py", "file_ext": "py", "file_size_in_byte": 627, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pg.cmakelists_generator_root.CMakelistsGeneratorRoot", "line_number": 3, "usage_type": "call"}]} +{"seq_id": "263578502", "text": "\"\"\"\n DataCorrectionTest Unit Tests\n\n Test Premises:\n - saved corrections will notify users\n - corrections will modify scores\n\"\"\"\nfrom django.test import TestCase\nfrom stars.apps.submissions.models import *\n\nimport sys\nfrom datetime import datetime\n\n\nclass DataCorrectionTest(TestCase):\n fixtures = ['data_correction_test.json',\n 'notification_emailtemplate_tests.json']\n\n def setUp(self):\n pass\n\n def test_correction(self):\n \"\"\"\n\n \"\"\"\n ss = SubmissionSet.objects.get(pk=1)\n self.assertEqual(ss.score, 100.0)\n\n platinum = Rating.objects.get(pk=5)\n self.assertEqual(ss.rating, platinum)\n\n cus = CreditUserSubmission.objects.get(pk=1)\n field = NumericSubmission.objects.get(pk=1)\n user = User.objects.get(pk=1)\n\n correction = DataCorrectionRequest(\n date=datetime.now(),\n reporting_field=field,\n new_value=4,\n explanation='just cuz',\n user=user,\n approved=False)\n correction.save()\n\n correction.approved = True\n correction.save()\n\n ss = SubmissionSet.objects.get(pk=1)\n self.assertEqual(ss.score, 80.0)\n\n gold = Rating.objects.get(pk=4)\n self.assertEqual(ss.rating, gold)\n", "sub_path": "stars/apps/submissions/tests/test_data_corrections.py", "file_name": "test_data_corrections.py", "file_ext": "py", "file_size_in_byte": 1299, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.test.TestCase", "line_number": 15, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 37, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "154030827", "text": "# -*- coding: utf-8 -*-\nfrom django.conf import settings\nimport math\n\nclass Pagination:\n limit = settings.PAGINATION\n\n def __init__(self, model, page=None, form=None, queryset=None):\n\n if not page:\n page = 0\n else:\n page = int(page)-1\n\n limit = self.limit\n a = limit * page\n b = a + limit\n\n data = {}\n if (form and form.is_valid()):\n data = form.cleaned_data\n keys_to_removes = []\n for key in data:\n if data[key] == None or data[key] == '':\n keys_to_removes.append(key)\n for key in keys_to_removes:\n data.pop(key, None)\n\n if queryset:\n result = queryset.filter(**data)\n else:\n result = model.objects.filter(**data)\n\n pages = math.ceil(len(result)/limit)\n result = result[a:b]\n\n pagination = [page - 2, page - 1, page, page + 1, page + 2]\n if pagination[0] < 1:\n diff = 1 - pagination[0]\n for i in range(len(pagination)):\n pagination[i] = pagination[i] + diff\n\n self.pagination = pagination\n self.pages = pages\n self.page = page\n self.query = result\n self.pagination_data = {'page': page, 'pages': pages, 'pagination': pagination}\n", "sub_path": "core/pagination.py", "file_name": "pagination.py", "file_ext": "py", "file_size_in_byte": 1166, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.conf.settings.PAGINATION", "line_number": 6, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 6, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "504696548", "text": "import cv2\nimport tensorflow as tf\nimport numpy as np\nfrom model import Model\n\nimg = cv2.imread('images/10.png')\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nblur = cv2.GaussianBlur(gray, (5, 5), 0)\nret, thr = cv2.threshold(blur, 100, 230, cv2.THRESH_BINARY_INV)\n\ncontours, _ = cv2.findContours(thr.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\nrects = [(x, y, w, h) for (x, y, w, h) in [cv2.boundingRect(each) for each in contours] if ((w * h > 15000) and (w * h < 500000))]\n\nimg_result = []\nimg_for_class = img.copy()\n\nmargin_pixel = 60\n\nwith tf.Session() as sess:\n cnn = model.Model(sess, \"model\", \"model_1\")\n\n for rect in rects:\n target_num = img_for_class[rect[1]-margin_pixel : rect[1]+rect[3]+margin_pixel, rect[0]-margin_pixel : rect[0]+rect[2]+margin_pixel]\n test_num = cv2.resize(target_num, (28,28))[:, :, 1]\n test_num = (test_num < 70) * test_num\n test_num = test_num.astype('float32') / 255.\n test_num = test_num.reshape((1, 28, 28, 1))\n predicted_num = cnn.test(test_num)\n \n # Draw the rectangles\n cv2.rectangle(img, (rect[0], rect[1]), \n (rect[0] + rect[2], rect[1] + rect[3]), (0, 255, 0), 5) \n \n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(img, str(predicted_num[0]), (rect[0],rect[1]), font, 4, (0,0,255), 10)\n\n img = cv2.resize(img, (1920, 1080))\n cv2.imshow('A', img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n", "sub_path": "opencv_mnist/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1459, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "cv2.imread", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 7, "usage_type": "attribute"}, {"api_name": "cv2.GaussianBlur", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY_INV", "line_number": 9, "usage_type": "attribute"}, {"api_name": "cv2.findContours", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.RETR_EXTERNAL", "line_number": 11, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 11, "usage_type": "attribute"}, {"api_name": "cv2.boundingRect", "line_number": 13, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 20, "usage_type": "call"}, {"api_name": "model.Model", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 35, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "165961615", "text": "#!/usr/bin/env python \n# -*- coding:utf-8 -*-\n\nfrom selenium import webdriver\nimport time\nfrom Commonlib.commonlib import Commonshare\n# 导入下拉框select\nfrom selenium.webdriver.support.select import Select\n# 导入键盘操作\nfrom selenium.webdriver.common.keys import Keys\nimport random\n\n\n# 商品基础库(页面内容的搜索)\n\nclass Search_goods(Commonshare):\n def create_goods(self):\n self.driver.implicitly_wait(5)\n # 打开采处方\n self.open_url('http://test2admin.caichufang.com/admin/login.html')\n # 添加token 跳过登录\n # self.driver.add_cookie({'name': 'SESSION', 'value': '0b7b1da4260e3fd9b7890520f035c290|1568253708|1568253627'})\n # self.open_url('https://admin.caichufang.com/admin/index.html')\n self.input_data('id', 'userName', 'ccf_super_admin')\n self.input_data('id', 'userPwd', 'caichufang2017')\n self.input_data('id', 'loginCaptcha', '0809')\n self.click('xpath', '/html/body/div/div[2]/div/form/div[4]/input')\n # 获取商品基础库元素,并进行点击\n self.click('xpath', '/html/body/div/div[2]/div[2]/div[1]/div/div/div[2]/ul/li[1]/div/a/div[2]')\n # 进入镶嵌页面iframe\n iframe = self.driver.find_element_by_name('inner')\n self.driver.switch_to_frame(iframe)\n # 进入输入框\n self.input_data('name', 'goodsBasicName', '脑络通')\n # 点击查询\n self.click('xpath', '/html/body/div[1]/div/form/div/div/div[2]/div/button[1]')\n # 定位一级分类 二级分类, 分类设置随机数\n a = random.randint(1, 14)\n goods_class_one = self.driver.find_element_by_id('oneClass')\n Select(goods_class_one).select_by_index(a)\n goods_class_one.click()\n goods_class_two = self.driver.find_elements_by_id('twoClass')\n for i in range(len(goods_class_two)):\n two_class = goods_class_two[i]\n print(two_class.text)\n b = random.randint(0, 3)\n Select(two_class).select_by_index(b)\n # 键盘操作\n key = self.driver.find_element_by_name('goodsBasicName')\n key.send_keys(Keys.CONTROL, 'a')\n key.send_keys(Keys.BACKSPACE)\n self.click('xpath', '/html/body/div[1]/div/form/div/div/div[2]/div/button[1]')\n self.click('text', '编辑')\n for i in range(10):\n js = 'window.scrollTo(0,%s)' % (i * 100)\n self.driver.execute_script(js)\n self.click('class', 'btn-primary')\n time.sleep(10)\n\n\nif __name__ == '__main__':\n good = Search_goods()\n good.create_goods()\n", "sub_path": "xiong/采处方/总后台/商品基础库/搜索.py", "file_name": "搜索.py", "file_ext": "py", "file_size_in_byte": 2606, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "Commonlib.commonlib.Commonshare", "line_number": 16, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 38, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.select.Select", "line_number": 40, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 46, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.select.Select", "line_number": 47, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.keys.Keys.CONTROL", "line_number": 50, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 50, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.keys.Keys.BACKSPACE", "line_number": 51, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 51, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "433742312", "text": "#\n# Copyright 2016 The BigDL Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\nfrom .dataloader import PytorchOpenVINODataLoader\nfrom .metric import PytorchOpenVINOMetric\nfrom ..core.model import OpenVINOModel\nfrom bigdl.nano.utils.inference.pytorch.model import AcceleratedLightningModule\nfrom .utils import export\nimport torch\nfrom bigdl.nano.utils.log4Error import invalidInputError\n\n\nclass PytorchOpenVINOModel(OpenVINOModel, AcceleratedLightningModule):\n def __init__(self, model, input_sample=None, **export_kwargs):\n \"\"\"\n Create a OpenVINO model from pytorch.\n\n :param model: Pytorch model to be converted to OpenVINO for inference or\n path to Openvino saved model.\n :param input_sample: A set of inputs for trace, defaults to None if you have trace before or\n model is a LightningModule with any dataloader attached,\n defaults to None.\n :param **export_kwargs: will be passed to torch.onnx.export function.\n \"\"\"\n ov_model_path = model\n with TemporaryDirectory() as dir:\n dir = Path(dir)\n if isinstance(model, torch.nn.Module):\n export(model, input_sample, str(dir / 'tmp.xml'), **export_kwargs)\n ov_model_path = dir / 'tmp.xml'\n OpenVINOModel.__init__(self, ov_model_path)\n AcceleratedLightningModule.__init__(self, None)\n\n def on_forward_start(self, inputs):\n if self.ie_network is None:\n invalidInputError(False,\n \"Please create an instance by PytorchOpenVINOModel()\"\n \" or PytorchOpenVINOModel.load()\")\n inputs = self.tensors_to_numpy(inputs)\n return inputs\n\n def on_forward_end(self, outputs):\n outputs = self.numpy_to_tensors(outputs.values())\n return outputs\n\n @property\n def status(self):\n status = super().status\n status.update({\"xml_path\": 'ov_saved_model.xml', \"weight_path\": 'ov_saved_model.bin'})\n return status\n\n @staticmethod\n def _load(path):\n \"\"\"\n Load an OpenVINO model for inference from directory.\n\n :param path: Path to model to be loaded.\n :return: PytorchOpenVINOModel model for OpenVINO inference.\n \"\"\"\n status = PytorchOpenVINOModel._load_status(path)\n if status.get('xml_path', None):\n xml_path = Path(status['xml_path'])\n invalidInputError(xml_path.suffix == '.xml',\n \"Path of openvino model must be with '.xml' suffix.\")\n else:\n invalidInputError(False, \"nano_model_meta.yml must specify 'xml_path' for loading.\")\n xml_path = Path(path) / status['xml_path']\n return PytorchOpenVINOModel(xml_path)\n\n def pot(self,\n dataloader,\n metric=None,\n higher_better=True,\n drop_type=\"relative\",\n maximal_drop=0.999,\n max_iter_num=1,\n n_requests=None,\n sample_size=300):\n # convert torch metric/dataloader to openvino format\n if metric:\n metric = PytorchOpenVINOMetric(metric=metric, higher_better=higher_better)\n dataloader = PytorchOpenVINODataLoader(dataloader, collate_fn=self.tensors_to_numpy)\n model = super().pot(dataloader, metric=metric, drop_type=drop_type,\n maximal_drop=maximal_drop, max_iter_num=max_iter_num,\n n_requests=n_requests, sample_size=sample_size)\n return PytorchOpenVINOModel(model)\n", "sub_path": "python/nano/src/bigdl/nano/deps/openvino/pytorch/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 4179, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "core.model.OpenVINOModel", "line_number": 27, "usage_type": "name"}, {"api_name": "bigdl.nano.utils.inference.pytorch.model.AcceleratedLightningModule", "line_number": 27, "usage_type": "name"}, {"api_name": "tempfile.TemporaryDirectory", "line_number": 40, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "attribute"}, {"api_name": "utils.export", "line_number": 43, "usage_type": "call"}, {"api_name": "core.model.OpenVINOModel.__init__", "line_number": 45, "usage_type": "call"}, {"api_name": "core.model.OpenVINOModel", "line_number": 45, "usage_type": "name"}, {"api_name": "bigdl.nano.utils.inference.pytorch.model.AcceleratedLightningModule.__init__", "line_number": 46, "usage_type": "call"}, {"api_name": "bigdl.nano.utils.inference.pytorch.model.AcceleratedLightningModule", "line_number": 46, "usage_type": "name"}, {"api_name": "bigdl.nano.utils.log4Error.invalidInputError", "line_number": 50, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 76, "usage_type": "call"}, {"api_name": "bigdl.nano.utils.log4Error.invalidInputError", "line_number": 77, "usage_type": "call"}, {"api_name": "bigdl.nano.utils.log4Error.invalidInputError", "line_number": 80, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 81, "usage_type": "call"}, {"api_name": "metric.PytorchOpenVINOMetric", "line_number": 95, "usage_type": "call"}, {"api_name": "dataloader.PytorchOpenVINODataLoader", "line_number": 96, "usage_type": "call"}]} +{"seq_id": "597444659", "text": "import os\nfrom setuptools import setup, find_packages\n\n\nif os.path.exists(\"README.md\"):\n with open(\"README.md\", \"rb\") as readme:\n LONG_DESCRIPTION = readme.read().decode(\"utf-8\")\nelse:\n LONG_DESCRIPTION = \"\"\n\n\nsetup(\n name=\"fanboi2\",\n version=\"2019.02\",\n description=\"Board engine behind fanboi.ch\",\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n url=\"https://git.sr.ht/~sirn/fanboi2\",\n author=\"Kridsada Thanabulpong\",\n author_email=\"sirn@ogsite.net\",\n license=\"BSD-3-Clause\",\n classifiers=[\n \"Framework :: Pyramid\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python\",\n \"Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Message Boards\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet\",\n ],\n keywords=\"web wsgi bfg pylons pyramid\",\n packages=find_packages(),\n include_package_data=True,\n install_requires=[\n \"MarkupSafe\",\n \"alembic >=0.9, <0.10\",\n \"argon2_cffi\",\n \"celery >=4.1, <4.2\",\n \"dogpile.cache >=0.6\",\n \"geoip2\",\n \"gunicorn\",\n \"hiredis >=0.2, <0.3\",\n \"isodate\",\n \"kombu >= 4.3, <4.4\",\n \"lark-parser >=0.6, <0.7\",\n \"misaka\",\n \"passlib\",\n \"psycopg2\",\n \"pyramid >=1.9, <1.10\",\n \"pyramid_debugtoolbar\",\n \"pyramid_mako\",\n \"pyramid_nacl_session\",\n \"pyramid_services\",\n \"pyramid_tm\",\n \"pytz\",\n \"redis >=2.0, <3.0\",\n \"requests\",\n \"sqlalchemy >=1.2, <1.3\",\n \"transaction\",\n \"wtforms >=2.1, <3.0\",\n \"zope.sqlalchemy\",\n ],\n zip_safe=False,\n test_suite=\"fanboi2.tests\",\n extras_require={\n \"dev\": [\"honcho\", \"hupper\", \"pre-commit\"],\n \"test\": [\"nose\", \"coverage\", \"rednose\"],\n \"deploy\": [\"fabric\", \"patchwork\", \"invocations\", \"colorama\"],\n },\n python_requires=\">=3.6\",\n entry_points={\n \"console_scripts\": [\n \"fbctl = fanboi2.cmd.ctl:main\",\n \"fbcelery = fanboi2.cmd.celery:main\",\n \"fbdeploy = fanboi2.cmd.deploy:main\",\n ]\n },\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 2335, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "os.path.exists", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "setuptools.setup", "line_number": 12, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "80622292", "text": "import os\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import FunctionTransformer, StandardScaler, \\\n OneHotEncoder\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.ensemble import RandomForestRegressor\n\n\ndef _process_students(X):\n \"\"\"Create new features linked to the pupils\"\"\"\n\n # average class size\n X['average_class_size'] = X['Nb élèves'] / X['Nb divisions']\n # percentage of pupils in the general stream\n X['percent_general_stream'] = \\\n X['Nb 6èmes 5èmes 4èmes et 3èmes générales'] / X['Nb élèves']\n # percentage of pupils in an european or international section\n X['percent_euro_int_section'] = \\\n X['Nb 6èmes 5èmes 4èmes et 3èmes générales sections européennes et internationales'] / X['Nb élèves'] # noqa\n # percentage of pupils doing Latin or Greek\n sum_global_5_to_3 = \\\n X['Nb 5èmes'] + X['Nb 4èmes générales'] + X['Nb 3èmes générales']\n X['percent_latin_greek'] = \\\n X['Nb 5èmes 4èmes et 3èmes générales Latin ou Grec'] / sum_global_5_to_3 # noqa\n # percentage of pupils that are in a SEGPA class\n X['percent_segpa'] = X['Nb SEGPA'] / X['Nb élèves']\n\n return np.c_[\n X['average_class_size'].values,\n X['percent_general_stream'].values,\n X['percent_euro_int_section'].values,\n X['percent_latin_greek'].values,\n X['percent_segpa'].values\n ]\n\n\ndef _merge_naive(X):\n\n # read the database with the city information\n filepath = os.path.join(\n os.path.dirname(__file__), 'external_data.csv'\n )\n cities_data = pd.read_csv(filepath, index_col=0)\n # merge the two databases at the city level\n df = pd.merge(\n X, cities_data, left_on='Commune et arrondissement code',\n right_on='insee_code', how='left'\n )\n keep_col_cities = [\n 'population', 'SUPERF', 'med_std_living', 'poverty_rate',\n 'unemployment_rate'\n ]\n # fill na by taking the average value at the departement level\n for col in keep_col_cities:\n if cities_data[col].isna().sum() > 0:\n df[col] = df[['Département code', col]]. \\\n groupby('Département code'). \\\n transform(lambda x: x.fillna(x.mean()))\n\n return df[keep_col_cities]\n\n\ndef get_estimator():\n\n students_col = [\n 'Nb élèves', 'Nb divisions', 'Nb 6èmes 5èmes 4èmes et 3èmes générales',\n 'Nb 6èmes 5èmes 4èmes et 3èmes générales sections européennes et internationales', # noqa\n 'Nb 5èmes', 'Nb 4èmes générales', 'Nb 3èmes générales',\n 'Nb 5èmes 4èmes et 3èmes générales Latin ou Grec', 'Nb SEGPA'\n ]\n num_cols = [\n 'Nb élèves', 'Nb 3èmes générales', 'Nb 3èmes générales retardataires',\n \"Nb 6èmes provenant d'une école EP\"\n ]\n cat_cols = [\n 'Appartenance EP', 'Etablissement sensible', 'CATAEU2010',\n 'Situation relative à une zone rurale ou autre'\n ]\n merge_col = [\n 'Commune et arrondissement code', 'Département code'\n ]\n drop_cols = [\n 'Name', 'Coordonnée X', 'Coordonnée Y', 'Commune code', 'City_name',\n 'Commune et arrondissement code', 'Commune et arrondissement nom',\n 'Département nom', 'Académie nom', 'Région nom', 'Région 2016 nom',\n 'Longitude', 'Latitude', 'Position'\n ]\n\n numeric_transformer = Pipeline(steps=[\n ('scale', StandardScaler())\n ])\n categorical_transformer = Pipeline(steps=[\n ('encode', OneHotEncoder(handle_unknown='ignore'))\n ])\n students_transformer = FunctionTransformer(\n _process_students, validate=False\n )\n students_transformer = make_pipeline(\n students_transformer, SimpleImputer(strategy='mean'),\n StandardScaler()\n )\n merge_transformer = FunctionTransformer(_merge_naive, validate=False)\n merge_transformer = make_pipeline(\n merge_transformer, SimpleImputer(strategy='mean')\n )\n\n preprocessor = ColumnTransformer(\n transformers=[\n ('num', numeric_transformer, num_cols),\n ('cat', categorical_transformer, cat_cols),\n ('students', students_transformer, students_col),\n ('merge', merge_transformer, merge_col),\n ('drop cols', 'drop', drop_cols),\n ], remainder='passthrough') # remainder='drop' or 'passthrough'\n\n regressor = RandomForestRegressor(\n n_estimators=5, max_depth=50, max_features=10\n )\n\n pipeline = Pipeline(steps=[\n ('preprocessing', preprocessor),\n ('classifier', regressor)\n ])\n\n return pipeline\n", "sub_path": "submissions/starting_kit/estimator.py", "file_name": "estimator.py", "file_ext": "py", "file_size_in_byte": 4768, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "numpy.c_", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 48, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 50, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 94, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 95, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 97, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.OneHotEncoder", "line_number": 98, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.FunctionTransformer", "line_number": 100, "usage_type": "call"}, {"api_name": "sklearn.pipeline.make_pipeline", "line_number": 103, "usage_type": "call"}, {"api_name": "sklearn.impute.SimpleImputer", "line_number": 104, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 105, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.FunctionTransformer", "line_number": 107, "usage_type": "call"}, {"api_name": "sklearn.pipeline.make_pipeline", "line_number": 108, "usage_type": "call"}, {"api_name": "sklearn.impute.SimpleImputer", "line_number": 109, "usage_type": "call"}, {"api_name": "sklearn.compose.ColumnTransformer", "line_number": 112, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 121, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 125, "usage_type": "call"}]} +{"seq_id": "223126282", "text": "import SimpleITK as sitk\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pydicom import dcmread\nfrom skimage.transform import rescale\nfrom matplotlib.widgets import Slider\n\ndicomEscan = dcmread('MapaCorre.dcm')\ndicomPlan = dcmread('piramide2.dcm')\n\ndef poner_imagen_en_punto(imgPoner,tamanoGrande,puntoEnGrande,puntoEnPoner):\n resp=np.zeros(tamanoGrande)\n x=imgPoner.shape[0]\n y=imgPoner.shape[1]\n if x%2==0:\n cor=0\n else:\n cor=1\n if y%2==0:\n cory=0\n else:\n cory=1 \n resp[puntoEnGrande[0]-int(x/2)+0:puntoEnGrande[0]+int(x/2)+cor+0,puntoEnGrande[1]-int(y/2)+0:puntoEnGrande[1]+int(x/2)+cory+0]=imgPoner\n return resp\n\n\nreescaldo=np.array(dicomPlan.PixelSpacing)/np.array(dicomEscan.PixelSpacing)\narrayPlan=rescale(dicomPlan.pixel_array,reescaldo,anti_aliasing=False)\ncentroEscan=int(dicomEscan.pixel_array.shape[0]/2),int(dicomEscan.pixel_array.shape[1]/2)\narrayPlanAjus=poner_imagen_en_punto(arrayPlan,dicomEscan.pixel_array.shape,centroEscan,(0,0))\n\narrayPlan=arrayPlanAjus/np.max(arrayPlanAjus)\n\n#arrayPlan=dicomPlan.pixel_array/np.max(dicomPlan.pixel_array)\narrayEscan=dicomEscan.pixel_array*dicomEscan.DoseGridScaling/7.06\n\nfixed_image = sitk.GetImageFromArray(arrayPlan)\nmoving_image = sitk.GetImageFromArray(arrayEscan)\n\n\nalpha=0.5\nfig=plt.figure()\nplt.imshow((1.0-alpha)*arrayPlan+alpha*arrayEscan,cmap=plt.cm.gray)\nax=plt.gca()\n\naxR=fig.add_axes([0.25, .03, 0.50, 0.02])\nalp = Slider(axR, 'Alpha', 0, 1, valinit=0.5, valstep=0.01)\n\ndef update(val):\n iv=alp.val\n ax.clear()\n ax.imshow((1.0-iv)*arrayPlan+iv*arrayEscan,cmap=plt.cm.gray)\n fig.canvas.draw_idle()\nalp.on_changed(update)\n\n\ninitial_transform = sitk.CenteredTransformInitializer(fixed_image, \n moving_image, \n sitk.Euler2DTransform(), \n sitk.CenteredTransformInitializerFilter.GEOMETRY)\n\nmoving_resampled = sitk.Resample(moving_image, fixed_image, initial_transform, sitk.sitkLinear, 0.0, moving_image.GetPixelID())\n\n\nregistration_method = sitk.ImageRegistrationMethod()\n\n# Similarity metric settings.\nregistration_method.SetMetricAsMeanSquares()\nregistration_method.SetMetricSamplingStrategy(registration_method.RANDOM)\nregistration_method.SetMetricSamplingPercentage(0.01)\n\nregistration_method.SetInterpolator(sitk.sitkLinear)\n\n# Optimizer settings.\nregistration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=40, convergenceMinimumValue=1e-6, convergenceWindowSize=10)\nregistration_method.SetOptimizerScalesFromPhysicalShift()\n\n# Setup for the multi-resolution framework. \nregistration_method.SetShrinkFactorsPerLevel(shrinkFactors = [4,2,1])\nregistration_method.SetSmoothingSigmasPerLevel(smoothingSigmas=[2,1,0])\nregistration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()\n\n# Don't optimize in-place, we would possibly like to run this cell multiple times.\nregistration_method.SetInitialTransform(initial_transform, inPlace=False)\n\n# Connect all of the observers so that we can perform plotting during registration.\n#registration_method.AddCommand(sitk.sitkStartEvent, start_plot)\n#registration_method.AddCommand(sitk.sitkEndEvent, end_plot)\n#registration_method.AddCommand(sitk.sitkMultiResolutionIterationEvent, update_multires_iterations) \n#registration_method.AddCommand(sitk.sitkIterationEvent, lambda: plot_values(registration_method))\n\nfinal_transform = registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat64), \n sitk.Cast(moving_image, sitk.sitkFloat64))\n \nprint('Final metric value: {0}'.format(registration_method.GetMetricValue()))\nprint('Optimizer\\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription())) \n\nmoving_resampled = sitk.Resample(moving_image, fixed_image, final_transform, sitk.sitkLinear, 0.0, moving_image.GetPixelID())\n\n\narrayEscan2=sitk.GetArrayViewFromImage(moving_resampled)\narrayPlan2=arrayPlan\n\n\nalpha2=0.5\nfig2=plt.figure()\nplt.imshow((1.0-alpha2)*arrayPlan2+alpha2*arrayEscan2,cmap=plt.cm.gray)\nax2=plt.gca()\n\naxR2=fig2.add_axes([0.25, .03, 0.50, 0.02])\nalp2 = Slider(axR2, 'Alpha', 0, 1, valinit=0.5, valstep=0.01)\n\ndef update(val):\n iv2=alp2.val\n ax2.clear()\n ax2.imshow((1.0-iv2)*arrayPlan2+iv2*arrayEscan2,cmap=plt.cm.gray)\n fig2.canvas.draw_idle()\nalp2.on_changed(update) \n \nplt.show()\n", "sub_path": "programa/moduloRegistro.py", "file_name": "moduloRegistro.py", "file_ext": "py", "file_size_in_byte": 4588, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pydicom.dcmread", "line_number": 9, "usage_type": "call"}, {"api_name": "pydicom.dcmread", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 28, "usage_type": "call"}, {"api_name": "skimage.transform.rescale", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 33, "usage_type": "call"}, {"api_name": "SimpleITK.GetImageFromArray", "line_number": 38, "usage_type": "call"}, {"api_name": "SimpleITK.GetImageFromArray", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 44, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.widgets.Slider", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 53, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "SimpleITK.CenteredTransformInitializer", "line_number": 58, "usage_type": "call"}, {"api_name": "SimpleITK.Euler2DTransform", "line_number": 60, "usage_type": "call"}, {"api_name": "SimpleITK.CenteredTransformInitializerFilter", "line_number": 61, "usage_type": "attribute"}, {"api_name": "SimpleITK.Resample", "line_number": 63, "usage_type": "call"}, {"api_name": "SimpleITK.sitkLinear", "line_number": 63, "usage_type": "attribute"}, {"api_name": "SimpleITK.ImageRegistrationMethod", "line_number": 66, "usage_type": "call"}, {"api_name": "SimpleITK.sitkLinear", "line_number": 73, "usage_type": "attribute"}, {"api_name": "SimpleITK.Cast", "line_number": 93, "usage_type": "call"}, {"api_name": "SimpleITK.sitkFloat64", "line_number": 93, "usage_type": "attribute"}, {"api_name": "SimpleITK.Cast", "line_number": 94, "usage_type": "call"}, {"api_name": "SimpleITK.sitkFloat64", "line_number": 94, "usage_type": "attribute"}, {"api_name": "SimpleITK.Resample", "line_number": 99, "usage_type": "call"}, {"api_name": "SimpleITK.sitkLinear", "line_number": 99, "usage_type": "attribute"}, {"api_name": "SimpleITK.GetArrayViewFromImage", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 108, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.widgets.Slider", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 117, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}]} +{"seq_id": "310115419", "text": "# Bio Bot (Telegram bot for managing the @Bio_Chain_2)\n# Copyright (C) 2019 Hackintosh Five\n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n\nimport ast\nimport collections\nimport io\nimport itertools\nimport networkx\nimport pickle\n\n\n\"\"\"Converts the username:bio dict to a tree of users, and optionally a list\"\"\"\n\n\nclass Forest:\n def __init__(self):\n self._instances = {}\n\n def get_roots(self):\n return filter(lambda x: not x.parents, self._instances.values())\n\n def get_nodes(self):\n return self._instances.values()\n\n def get_node(self, username, uid=None, add=True):\n try:\n ret = self._instances[username.lower()]\n assert uid is None or ret.uid == uid or ret.uid is None\n if uid is not None:\n ret.uid = uid\n return ret\n except KeyError:\n if not add:\n raise\n self._instances[username.lower()] = ret = User(self, username)\n ret.uid = uid\n return ret\n\n def get_dict(self):\n ret = {}\n for node in self.get_nodes():\n ret[(node.uid, node.username)] = [child.username for child in node.children]\n return ret\n\n def __getstate__(self):\n return {k: (v.username, v.uid, [child.username for child in v.children], v.extras)\n for k, v in self._instances.items()}\n\n def __setstate__(self, state):\n self._instances = {}\n for k, v in state.items():\n user = self._instances[k] = User(self, v[0])\n user.uid = v[1]\n user.extras = v[3]\n for k, v in state.items():\n for child_username in v[2]:\n self._instances[k].add_child(child_username)\n\n\nclass User:\n def __init__(self, forest, username):\n self.forest = forest\n self.username = username\n self.uid = None # Not used in the actual tree, just used to simplify other code by preserving data\n self.extras = {}\n self.children = []\n self.parents = []\n\n def add_child(self, child_username):\n new = self.forest.get_node(child_username)\n new.parents.append(self)\n self.children.append(new)\n return new\n\n def _repr(self, instances):\n if self not in instances:\n return \"{\" + self.username + \": [\" + \", \".join(child._repr(instances + [self])\n for child in self.children) + \"]}\"\n else:\n return f\"(recursive loop to {self.username})\"\n\n def __str__(self):\n return self._repr([])\n\n def __repr__(self):\n return \"User(username=\" + repr(self.username) + \", uid=\" + repr(self.uid) + \")\"\n\n\ndef _destringize(data):\n if data == \"None\":\n return None\n try:\n return ast.literal_eval(data)\n except SyntaxError:\n raise ValueError from e\n\n\ndef make_graph(data, users_data={}):\n if isinstance(data, tuple):\n data, name = data\n if name == \"raw_chain.forest\":\n data = pickle.loads(data)\n elif name == \"chain.gml\":\n data = networkx.read_gml(io.BytesIO(data), destringizer=_destringize)\n else:\n raise RuntimeError(f\"file name {name} incorrect\")\n if isinstance(data, networkx.DiGraph):\n return data\n graph = networkx.DiGraph()\n if isinstance(data, Forest):\n old_data = data\n data = data.get_dict()\n else:\n old_data = None\n username_to_name = {username.casefold(): username or uid for uid, username in data if username}\n for uid, username in data:\n graph.add_node(username or uid, username=username, uid=uid, **users_data.get((uid, username), {}))\n for (uid, username), children in data.items():\n name = username or uid\n for child in children:\n child_name = username_to_name.get(child.casefold(), None)\n if child_name is None:\n username_to_name[child] = child\n graph.add_node(child, username=child, uid=None)\n child_name = child\n graph.add_edge(name, child_name)\n if old_data:\n for node in old_data.get_nodes():\n name = node.username or node.uid\n graph.nodes[name][\"access_hash\"] = getattr(node.extras.get(\"entity\", None), \"access_hash\", None)\n graph.nodes[name][\"deleted\"] = None\n return graph\n\n\ndef _score_node(val):\n name, score = val\n if isinstance(name, int):\n return score - 1\n return score\n\n\ndef _score_chain(chain):\n ret = len(chain)\n if isinstance(chain[0], int) or isinstance(chain[-1], int):\n ret -= 1\n return ret\n\n\ndef _edge_bfs(graph, root, get_children):\n # root is returned last\n queue = collections.deque(((None, root),))\n visited_nodes = set()\n nexts = {(None, root): None}\n in_scores = {root: 0}\n out_scores = {root: 0}\n while queue:\n last_edge = queue.popleft()\n last_node, node = last_edge\n visited_nodes.add(node)\n children = tuple(get_children(graph, node))\n nexts[(node, None)] = last_edge\n for child in children:\n edge = (node, child)\n duplicate_node = False\n # quick and greedy heuristic to detect potential cycles\n if child in visited_nodes:\n # (slowly) verify cycle exists with latest iteration of the history (if this becomes outdated, a parent will always be added to queue and we will rerun this)\n next = last_edge\n duplicate_edge = False\n while next[0]:\n if next == edge:\n # the edge appears in our history, we already did this cycle\n duplicate_edge = True\n break\n if next[1] == child:\n duplicate_node = True\n next = nexts[next]\n if duplicate_edge:\n continue\n if out_scores[node] + 1 > in_scores.get(child, float(\"-inf\")):\n queue.append(edge)\n out_scores[child] = out_scores[node] + 1\n if not duplicate_node:\n # when a node is duplicated, we want to allow longer paths that go into it to replace the root, but we still want to count the loop towards the path score\n in_scores[child] = out_scores[node] + 1\n nexts[edge] = last_edge\n best = max(out_scores.items(), key=_score_node)\n next = best[0], None\n ret = []\n while next[0]:\n ret.append(next[0])\n next = nexts[next]\n return ret\n\n\ndef make_chain(graph, target):\n # select longest path, preferring chains ending in a username\n return _edge_bfs(graph, target, networkx.DiGraph.predecessors)\n\n\ndef make_notinchain(graph, target):\n chain = make_chain(graph, target)\n return set(graph.nodes) - set(chain)\n\n\ndef make_all_chains(data):\n \"\"\"\n Get a list of chains possible to generate from the data\n Prefers to make longer chains than shorter ones\n \"\"\"\n cut = data.copy()\n ret = []\n while len(cut):\n roots = [k for k, v in cut.pred.items() if not v]\n if not roots:\n # We can put the cycles off until the end, since they are unreachable they will not be deleted\n # To process the cycles, we pick a node, find the longest path backwards, and then forwards from that node.\n # We repeat this until all nodes are used.\n # Note that since there are no roots, every node in the component *must* be reachable.\n root = next(iter(cut))\n backwards = _edge_bfs(cut, root, networkx.DiGraph.predecessors)\n # No actually the longest in the whole graph, but it is the longest in the component, so it's irrelevant.\n longest = _edge_bfs(cut, backwards[0], networkx.DiGraph.neighbors)\n else:\n longest = max((_edge_bfs(cut, root, networkx.DiGraph.neighbors) for root in roots), key=_score_chain)\n longest.reverse()\n ret.append(longest)\n cut.remove_nodes_from(longest)\n return ret\n", "sub_path": "biobot/chain.py", "file_name": "chain.py", "file_ext": "py", "file_size_in_byte": 8758, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "ast.literal_eval", "line_number": 106, "usage_type": "call"}, {"api_name": "pickle.loads", "line_number": 115, "usage_type": "call"}, {"api_name": "networkx.read_gml", "line_number": 117, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 117, "usage_type": "call"}, {"api_name": "networkx.DiGraph", "line_number": 120, "usage_type": "attribute"}, {"api_name": "networkx.DiGraph", "line_number": 122, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 164, "usage_type": "call"}, {"api_name": "networkx.DiGraph", "line_number": 211, "usage_type": "attribute"}, {"api_name": "networkx.DiGraph", "line_number": 234, "usage_type": "attribute"}, {"api_name": "networkx.DiGraph", "line_number": 236, "usage_type": "attribute"}, {"api_name": "networkx.DiGraph", "line_number": 238, "usage_type": "attribute"}]} +{"seq_id": "262726700", "text": "from django.contrib.auth.decorators import login_required\nfrom django.db.models import Count\nfrom django.http import HttpResponseRedirect, JsonResponse\nfrom django.shortcuts import render\nfrom django.urls import reverse\n\nfrom QandA.forms import *\nfrom account.models import User, Credential, Topic\nfrom QandA.models import Question, Answer, AnswerComment, AnswerRequest\n\n\n@login_required\ndef home(request):\n if request.user.is_authenticated:\n return render(request, 'home.html', {'unread_count': request.user.notifications.unread().count(),\n 'notifications': request.user.notifications.all()\n })\n else:\n return render(request, 'home.html')\n\n\ndef ask_question(request):\n if request.method == 'POST':\n uf = QForm(request.POST)\n if uf.is_valid():\n question = uf.save(commit=False)\n question.asker = request.user\n question.save()\n return HttpResponseRedirect(reverse('QandA:question', args={question.id}))\n else:\n print()\n\n\ndef edit(request, q_id):\n if request.method == 'POST':\n uf = QForm(request.POST, instance=Question.objects.get(id=q_id))\n if uf.is_valid():\n question = uf.save(commit=False)\n # if request.user not in question.editors.all():\n question.editors.add(request.user)\n question.save()\n return HttpResponseRedirect(reverse('QandA:question', args={q_id}))\n else:\n print()\n\n\ndef answer(request, q_id):\n if request.method == 'POST':\n answer_f = Answer()\n answer_f.responder = request.user\n answer_f.text = request.POST['ckeq-'+q_id]\n answer_f.question = Question.objects.get(id=q_id)\n answer_f.save()\n return render(request, 'Answer/answer.html', {'answer': answer_f, 'way': 'single'})\n\n\ndef question_page(request, q_id):\n question = Question.objects.get(id=q_id)\n return render(request, 'Question/question.html', {'question': question})\n\n\ndef vote(request, a_id):\n selected_answer = Answer.objects.get(id=a_id)\n selected_answer.voters.add(request.user)\n return JsonResponse({'vote_count': selected_answer.voters.all().count()})\n\n\ndef devote(request, a_id):\n selected_answer = Answer.objects.get(id=a_id)\n selected_answer.voters.remove(request.user)\n return JsonResponse({'vote_count': selected_answer.voters.all().count()})\n\n\ndef comment_vote(request, c_id):\n selected_comment = AnswerComment.objects.get(id=c_id)\n selected_comment.voters.add(request.user)\n return JsonResponse({'vote_count': selected_comment.voters.all().count()})\n\n\ndef comment_devote(request, c_id):\n selected_comment = AnswerComment.objects.get(id=c_id)\n selected_comment.voters.remove(request.user)\n return JsonResponse({'vote_count': selected_comment.voters.all().count()})\n\n\ndef bookmark(request, a_id):\n selected_answer = Answer.objects.get(id=a_id)\n selected_answer.bookmarkers.add(request.user)\n return JsonResponse({})\n\n\ndef unbookmark(request, a_id):\n selected_answer = Answer.objects.get(id=a_id)\n selected_answer.bookmarkers.remove(request.user)\n return JsonResponse({})\n\n\ndef share(request, a_id):\n selected_answer = Answer.objects.get(id=a_id)\n selected_answer.shareholders.add(request.user)\n return JsonResponse({})\n\n\ndef unshare(request, a_id):\n selected_answer = Answer.objects.get(id=a_id)\n selected_answer.shareholders.remove(request.user)\n return JsonResponse({})\n\n\ndef comment(request, a_id):\n selected_answer = Answer.objects.get(id=a_id)\n text = request.GET['text']\n new_comment = AnswerComment(commenter=request.user, answer=selected_answer, text=text )\n new_comment.save()\n return render(request, 'Comment/comment.html', {'comment': new_comment})\n\n\ndef comment_delete(request, c_id):\n selected_comment = AnswerComment.objects.get(id=c_id)\n if request.user == selected_comment.commenter:\n selected_comment.delete()\n return JsonResponse({})\n\n\ndef answer_delete(request, a_id):\n selected_answer = Answer.objects.get(id=a_id)\n if request.user == selected_answer.responder:\n selected_answer.delete()\n return JsonResponse({})\n\n\ndef answer_edit(request, a_id):\n if request.method == 'POST':\n selected_answer = Answer.objects.get(id=a_id)\n if request.user == selected_answer.responder:\n selected_answer.text = request.POST['ckea-'+a_id]\n selected_answer.save()\n return render(request, 'Answer/answer.html', {'answer': selected_answer, 'way': 'single'})\n\n\ndef follow(request, q_id):\n selected_question = Question.objects.get(id=q_id)\n selected_question.followers.add(request.user)\n return JsonResponse({'follower_count': selected_question.followers.all().count()})\n\n\ndef unfollow(request, q_id):\n selected_question = Question.objects.get(id=q_id)\n selected_question.followers.remove(request.user)\n return JsonResponse({'follower_count': selected_question.followers.all().count()})\n\n\ndef question_topic_delete(request, q_id, t_id):\n selected_question = Question.objects.get(id=q_id)\n selected_topic = Topic.objects.get(id=t_id)\n selected_question.topics.remove(selected_topic)\n return JsonResponse({})\n\n\ndef question_topic_add(request, q_id, t_id):\n selected_question = Question.objects.get(id=q_id)\n selected_topic = Topic.objects.get(id=t_id)\n included = 0\n if selected_topic in selected_question.topics.all():\n included = 1\n else:\n selected_question.topics.add(selected_topic)\n return JsonResponse({'topic_id': selected_topic.id, 'topic_name': selected_topic.name, 'included': included})\n\n\ndef best_topic_based_users_for_question(request, t_id, q_id):\n question = Question.objects.get(id=q_id)\n topic = Topic.objects.get(id=t_id)\n user_subtract_set = AnswerRequest.objects.filter(asker=request.user, question=question).values_list('askee', flat=True)\n user_count = get_user_count(topic, user_subtract_set, request.user)\n return render(request, 'Request/user_answer_counts.html', {'user_count':user_count, 'topic':topic})\n\n\ndef best_topic_based_users(request, t_id):\n topic = Topic.objects.get(id=t_id)\n user_count = get_user_count(topic)\n return render(request, 'User/best_topic_based_users.html', {'user_count': user_count})\n\n\ndef get_user_count(topic, user_subtract_set=None, asker=None):\n answers = topic.questions.values_list('answer', flat=True)\n users = Answer.objects.filter(id__in=answers).values('responder').order_by()\n user_count = users.annotate(answer_count=Count('responder')).order_by('-answer_count')[:20]\n if user_subtract_set and asker:\n users = User.objects.filter(id__in=user_count.values_list('responder', flat=True)).exclude(id__in=user_subtract_set).exclude(id=asker.id)\n else:\n users = User.objects.filter(id__in=user_count.values_list('responder', flat=True))\n counts = user_count.values_list('answer_count', flat=True)\n return zip(users, counts)\n\n\ndef best_question_based_users(request, q_id):\n question = Question.objects.get(id=q_id)\n user_topic_count = {}\n user_subtract_set = AnswerRequest.objects.filter(asker=request.user, question=question).values_list('askee', flat=True)\n for topic in question.topics.all():\n user_count = get_user_count(topic, user_subtract_set, request.user)\n for user, count in user_count:\n if user not in user_topic_count:\n user_topic_count[user] = {}\n user_topic_count[user][topic] = count\n return render(request, 'Request/user_topic_answer_counts.html', {'user_topic_count': user_topic_count})\n\n\ndef answer_request(request, u_id, q_id):\n question = Question.objects.get(id=q_id)\n user = User.objects.get(id=u_id)\n ar = AnswerRequest(asker=request.user, askee=user, question=question)\n ar.save()\n return JsonResponse({})\n\n\ndef add_answer_credential(request, a_id, c_id):\n selected_answer = Answer.objects.get(id=a_id)\n selected_credential = Credential.objects.get(id=c_id)\n if request.user == selected_answer.responder:\n selected_answer.credential = selected_credential\n selected_answer.save()\n return JsonResponse({})\n\n\ndef question_topic(request, q_id):\n topics = Question.objects.get(id=q_id).topics\n return render(request, 'Topic/topic_list.html', {'topics': topics, 'way': 'empty'})\n", "sub_path": "QandA/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 8479, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.shortcuts.render", "line_number": 15, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 19, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 12, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 29, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 29, "usage_type": "call"}, {"api_name": "QandA.models.Question.objects.get", "line_number": 36, "usage_type": "call"}, {"api_name": "QandA.models.Question.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "QandA.models.Question", "line_number": 36, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 42, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 42, "usage_type": "call"}, {"api_name": "QandA.models.Answer", "line_number": 49, "usage_type": "call"}, {"api_name": "QandA.models.Question.objects.get", "line_number": 52, "usage_type": "call"}, {"api_name": "QandA.models.Question.objects", "line_number": 52, "usage_type": "attribute"}, {"api_name": "QandA.models.Question", "line_number": 52, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 54, "usage_type": "call"}, {"api_name": "QandA.models.Question.objects.get", "line_number": 58, "usage_type": "call"}, {"api_name": "QandA.models.Question.objects", "line_number": 58, "usage_type": "attribute"}, {"api_name": "QandA.models.Question", "line_number": 58, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 59, "usage_type": "call"}, {"api_name": "QandA.models.Answer.objects.get", "line_number": 63, "usage_type": "call"}, {"api_name": "QandA.models.Answer.objects", "line_number": 63, "usage_type": "attribute"}, {"api_name": "QandA.models.Answer", "line_number": 63, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 65, "usage_type": "call"}, {"api_name": "QandA.models.Answer.objects.get", "line_number": 69, "usage_type": "call"}, {"api_name": "QandA.models.Answer.objects", "line_number": 69, "usage_type": "attribute"}, {"api_name": "QandA.models.Answer", "line_number": 69, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 71, "usage_type": "call"}, {"api_name": "QandA.models.AnswerComment.objects.get", "line_number": 75, "usage_type": "call"}, {"api_name": "QandA.models.AnswerComment.objects", "line_number": 75, "usage_type": "attribute"}, {"api_name": "QandA.models.AnswerComment", "line_number": 75, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 77, "usage_type": "call"}, {"api_name": "QandA.models.AnswerComment.objects.get", "line_number": 81, "usage_type": "call"}, {"api_name": "QandA.models.AnswerComment.objects", "line_number": 81, "usage_type": "attribute"}, {"api_name": "QandA.models.AnswerComment", "line_number": 81, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 83, "usage_type": "call"}, {"api_name": "QandA.models.Answer.objects.get", "line_number": 87, "usage_type": "call"}, {"api_name": "QandA.models.Answer.objects", "line_number": 87, "usage_type": "attribute"}, {"api_name": "QandA.models.Answer", "line_number": 87, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 89, "usage_type": "call"}, {"api_name": "QandA.models.Answer.objects.get", "line_number": 93, "usage_type": "call"}, {"api_name": "QandA.models.Answer.objects", "line_number": 93, "usage_type": "attribute"}, {"api_name": "QandA.models.Answer", "line_number": 93, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 95, "usage_type": "call"}, {"api_name": "QandA.models.Answer.objects.get", "line_number": 99, "usage_type": "call"}, {"api_name": "QandA.models.Answer.objects", "line_number": 99, "usage_type": "attribute"}, {"api_name": "QandA.models.Answer", "line_number": 99, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 101, "usage_type": "call"}, {"api_name": "QandA.models.Answer.objects.get", "line_number": 105, "usage_type": "call"}, {"api_name": "QandA.models.Answer.objects", "line_number": 105, "usage_type": "attribute"}, {"api_name": "QandA.models.Answer", "line_number": 105, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 107, "usage_type": "call"}, {"api_name": "QandA.models.Answer.objects.get", "line_number": 111, "usage_type": "call"}, {"api_name": "QandA.models.Answer.objects", "line_number": 111, "usage_type": "attribute"}, {"api_name": "QandA.models.Answer", "line_number": 111, "usage_type": "name"}, {"api_name": "QandA.models.AnswerComment", "line_number": 113, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 115, "usage_type": "call"}, {"api_name": "QandA.models.AnswerComment.objects.get", "line_number": 119, "usage_type": "call"}, {"api_name": "QandA.models.AnswerComment.objects", "line_number": 119, "usage_type": "attribute"}, {"api_name": "QandA.models.AnswerComment", "line_number": 119, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 122, "usage_type": "call"}, {"api_name": "QandA.models.Answer.objects.get", "line_number": 126, "usage_type": "call"}, {"api_name": "QandA.models.Answer.objects", "line_number": 126, "usage_type": "attribute"}, {"api_name": "QandA.models.Answer", "line_number": 126, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 129, "usage_type": "call"}, {"api_name": "QandA.models.Answer.objects.get", "line_number": 134, "usage_type": "call"}, {"api_name": "QandA.models.Answer.objects", "line_number": 134, "usage_type": "attribute"}, {"api_name": "QandA.models.Answer", "line_number": 134, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 138, "usage_type": "call"}, {"api_name": "QandA.models.Question.objects.get", "line_number": 142, "usage_type": "call"}, {"api_name": "QandA.models.Question.objects", "line_number": 142, "usage_type": "attribute"}, {"api_name": "QandA.models.Question", "line_number": 142, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 144, "usage_type": "call"}, {"api_name": "QandA.models.Question.objects.get", "line_number": 148, "usage_type": "call"}, {"api_name": "QandA.models.Question.objects", "line_number": 148, "usage_type": "attribute"}, {"api_name": "QandA.models.Question", "line_number": 148, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 150, "usage_type": "call"}, {"api_name": "QandA.models.Question.objects.get", "line_number": 154, "usage_type": "call"}, {"api_name": "QandA.models.Question.objects", "line_number": 154, "usage_type": "attribute"}, {"api_name": "QandA.models.Question", "line_number": 154, "usage_type": "name"}, {"api_name": "account.models.Topic.objects.get", "line_number": 155, "usage_type": "call"}, {"api_name": "account.models.Topic.objects", "line_number": 155, "usage_type": "attribute"}, {"api_name": "account.models.Topic", "line_number": 155, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 157, "usage_type": "call"}, {"api_name": "QandA.models.Question.objects.get", "line_number": 161, "usage_type": "call"}, {"api_name": "QandA.models.Question.objects", "line_number": 161, "usage_type": "attribute"}, {"api_name": "QandA.models.Question", "line_number": 161, "usage_type": "name"}, {"api_name": "account.models.Topic.objects.get", "line_number": 162, "usage_type": "call"}, {"api_name": "account.models.Topic.objects", "line_number": 162, "usage_type": "attribute"}, {"api_name": "account.models.Topic", "line_number": 162, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 168, "usage_type": "call"}, {"api_name": "QandA.models.Question.objects.get", "line_number": 172, "usage_type": "call"}, {"api_name": "QandA.models.Question.objects", "line_number": 172, "usage_type": "attribute"}, {"api_name": "QandA.models.Question", "line_number": 172, "usage_type": "name"}, {"api_name": "account.models.Topic.objects.get", "line_number": 173, "usage_type": "call"}, {"api_name": "account.models.Topic.objects", "line_number": 173, "usage_type": "attribute"}, {"api_name": "account.models.Topic", "line_number": 173, "usage_type": "name"}, {"api_name": "QandA.models.AnswerRequest.objects.filter", "line_number": 174, "usage_type": "call"}, {"api_name": "QandA.models.AnswerRequest.objects", "line_number": 174, "usage_type": "attribute"}, {"api_name": "QandA.models.AnswerRequest", "line_number": 174, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 176, "usage_type": "call"}, {"api_name": "account.models.Topic.objects.get", "line_number": 180, "usage_type": "call"}, {"api_name": "account.models.Topic.objects", "line_number": 180, "usage_type": "attribute"}, {"api_name": "account.models.Topic", "line_number": 180, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 182, "usage_type": "call"}, {"api_name": "QandA.models.Answer.objects.filter", "line_number": 187, "usage_type": "call"}, {"api_name": "QandA.models.Answer.objects", "line_number": 187, "usage_type": "attribute"}, {"api_name": "QandA.models.Answer", "line_number": 187, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 188, "usage_type": "call"}, {"api_name": "account.models.User.objects.filter", "line_number": 190, "usage_type": "call"}, {"api_name": "account.models.User.objects", "line_number": 190, "usage_type": "attribute"}, {"api_name": "account.models.User", "line_number": 190, "usage_type": "name"}, {"api_name": "account.models.User.objects.filter", "line_number": 192, "usage_type": "call"}, {"api_name": "account.models.User.objects", "line_number": 192, "usage_type": "attribute"}, {"api_name": "account.models.User", "line_number": 192, "usage_type": "name"}, {"api_name": "QandA.models.Question.objects.get", "line_number": 198, "usage_type": "call"}, {"api_name": "QandA.models.Question.objects", "line_number": 198, "usage_type": "attribute"}, {"api_name": "QandA.models.Question", "line_number": 198, "usage_type": "name"}, {"api_name": "QandA.models.AnswerRequest.objects.filter", "line_number": 200, "usage_type": "call"}, {"api_name": "QandA.models.AnswerRequest.objects", "line_number": 200, "usage_type": "attribute"}, {"api_name": "QandA.models.AnswerRequest", "line_number": 200, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 207, "usage_type": "call"}, {"api_name": "QandA.models.Question.objects.get", "line_number": 211, "usage_type": "call"}, {"api_name": "QandA.models.Question.objects", "line_number": 211, "usage_type": "attribute"}, {"api_name": "QandA.models.Question", "line_number": 211, "usage_type": "name"}, {"api_name": "account.models.User.objects.get", "line_number": 212, "usage_type": "call"}, {"api_name": "account.models.User.objects", "line_number": 212, "usage_type": "attribute"}, {"api_name": "account.models.User", "line_number": 212, "usage_type": "name"}, {"api_name": "QandA.models.AnswerRequest", "line_number": 213, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 215, "usage_type": "call"}, {"api_name": "QandA.models.Answer.objects.get", "line_number": 219, "usage_type": "call"}, {"api_name": "QandA.models.Answer.objects", "line_number": 219, "usage_type": "attribute"}, {"api_name": "QandA.models.Answer", "line_number": 219, "usage_type": "name"}, {"api_name": "account.models.Credential.objects.get", "line_number": 220, "usage_type": "call"}, {"api_name": "account.models.Credential.objects", "line_number": 220, "usage_type": "attribute"}, {"api_name": "account.models.Credential", "line_number": 220, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 224, "usage_type": "call"}, {"api_name": "QandA.models.Question.objects.get", "line_number": 228, "usage_type": "call"}, {"api_name": "QandA.models.Question.objects", "line_number": 228, "usage_type": "attribute"}, {"api_name": "QandA.models.Question", "line_number": 228, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 229, "usage_type": "call"}]} +{"seq_id": "201975070", "text": "from datetime import datetime\nimport os\nfrom django.conf import settings\nfrom django.shortcuts import render\nfrom django.http import Http404\n\ndef file_list(request, date=None):\n template_name = 'index.html'\n # Реализуйте алгоритм подготавливающий контекстные данные для шаблона по примеру:\n context = {\n 'files': [],\n 'date': date\n }\n\n file_name_list = os.listdir(settings.FILES_PATH)\n\n for file_name in file_name_list:\n file_path = os.path.join(settings.FILES_PATH, file_name)\n file_full_info = os.stat(file_path)\n\n ctime = datetime.fromtimestamp(file_full_info.st_ctime)\n mtime = datetime.fromtimestamp(file_full_info.st_mtime)\n\n file_info = {\n 'name': file_name,\n 'ctime': ctime,\n 'mtime': mtime\n }\n\n if date:\n if file_info['ctime'].date() == datetime.strptime(date, '%Y-%m-%d').date():\n context['files'].append(file_info)\n else:\n context['files'].append(file_info)\n \n return render(request, template_name, context)\n\n\ndef file_content(request, name):\n file_path = os.path.join(settings.FILES_PATH, name)\n if os.path.exists(file_path):\n with open(file_path, 'r') as file:\n file_contents = file.read()\n\n return render(\n request,\n 'file_content.html',\n context={'file_name': name, 'file_content': file_contents}\n )\n else:\n raise Http404\n\n", "sub_path": "file_server/app/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1561, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "os.listdir", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.settings.FILES_PATH", "line_number": 15, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 15, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.conf.settings.FILES_PATH", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 18, "usage_type": "name"}, {"api_name": "os.stat", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 21, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 22, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 31, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "django.conf.settings.FILES_PATH", "line_number": 40, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 40, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "django.shortcuts.render", "line_number": 45, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 51, "usage_type": "name"}]} +{"seq_id": "96474865", "text": "import time\nfrom watchdog.observers import Observer\nfrom watchdog.events import RegexMatchingEventHandler\nfrom watchdog.events import PatternMatchingEventHandler\nimport os\n\n\ndef on_created(event):\n print(f\"{event.src_path} has been created\")\n os.system(f\"git add {event.src_path}\")\n os.system(f\"git commit -m '{event.src_path} created'\")\n os.system(\"git push origin master\")\n\n\ndef on_deleted(event):\n print(f\"Delete {event.src_path}!\")\n os.system(f\"git commit -m '{event.src_path} deleted'\")\n os.system(\"git push origin master\")\n\n\ndef on_modified(event):\n print(f\"{event.src_path} has been modified\")\n os.system(f\"git add .\")\n os.system(f\"git commit -m '{event.src_path} modified'\")\n os.system(\"git push origin master\")\n\n\n# create the event handler\nif __name__ == \"__main__\":\n # patterns = [\".*\"]\n ignore_patterns = [\"^./.git\"]\n ignore_directories = False\n case_sensitive = True\n # my_event_handler = RegexMatchingEventHandler(patterns, ignore_patterns, ignore_directories, case_sensitive)\n my_event_handler = RegexMatchingEventHandler(ignore_regexes=ignore_patterns)\n\n my_event_handler.on_created = on_created\n my_event_handler.on_deleted = on_deleted\n my_event_handler.on_modified = on_modified\n\n# create an observer\n path = \".\"\n go_recursively = True\n my_observer = Observer()\n my_observer.schedule(my_event_handler, path, recursive=go_recursively)\n\n my_observer.start()\n # try:\n # while True:\n # time.sleep(5)\n # except:\n # my_observer.stop()\n # print(\"Observer Stopped\")\n # # wait until finished\n my_observer.join()\n", "sub_path": "autoGitSimp.py", "file_name": "autoGitSimp.py", "file_ext": "py", "file_size_in_byte": 1640, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "os.system", "line_number": 10, "usage_type": "call"}, {"api_name": "os.system", "line_number": 11, "usage_type": "call"}, {"api_name": "os.system", "line_number": 12, "usage_type": "call"}, {"api_name": "os.system", "line_number": 17, "usage_type": "call"}, {"api_name": "os.system", "line_number": 18, "usage_type": "call"}, {"api_name": "os.system", "line_number": 23, "usage_type": "call"}, {"api_name": "os.system", "line_number": 24, "usage_type": "call"}, {"api_name": "os.system", "line_number": 25, "usage_type": "call"}, {"api_name": "watchdog.events.RegexMatchingEventHandler", "line_number": 35, "usage_type": "call"}, {"api_name": "watchdog.observers.Observer", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "529710065", "text": "import keras\nimport numpy\nimport sklearn\nfrom keras.datasets import mnist\nfrom keras.models import Model, Sequential\nfrom keras.layers import Conv2D, Dense, Dropout, Flatten, BatchNormalization, Reshape, Conv2DTranspose, UpSampling2D, Activation, GlobalAveragePooling2D, Input, ZeroPadding2D\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom numpy.random import random\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import shuffle\nimport matplotlib.pyplot as plt\nimport pathlib\nimport logging\nimport sys\nimport keras.backend as K\nfrom tensorflow.python import debug as tf_debug\n\n# sess = K.get_session()\n# sess = tf_debug.LocalCLIDebugWrapperSession(sess)\n# K.set_session(sess)\n\n\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO)\nlog = logging.getLogger()\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\nMNIST = (numpy.concatenate((x_train, x_test)) / 128.) - 1. #length is 70000\n\nDEPTH = 64\nKERNEL = 5\nSTRIDES = 2\nPADDING = 'same'\nACTIVATION = 'elu'\nDROPOUT = 0.4\nBATCH_NORM = 0.9\nPATIENCE = 2\nSAMPLE_SIZE = 10000\nSEED = 42\n\n\ndef d_conv_layer(inputs, multiplier, stride=STRIDES, trainable=True):\n layer = Conv2D(\n name='d_conv_%s' % multiplier,\n strides=stride,\n filters=DEPTH * 1,\n kernel_size=KERNEL,\n input_shape=inputs.shape,\n padding=PADDING,\n trainable=trainable)(inputs)\n layer = LeakyReLU(alpha=0.3)(layer)\n layer = Dropout(DROPOUT)(layer)\n layer = BatchNormalization()(layer)\n return layer\n\n\ndef discriminator_layers(inputs, trainable=True):\n layers = d_conv_layer(inputs, 1, trainable=trainable)\n layers = d_conv_layer(layers, 2, trainable=trainable)\n layers = d_conv_layer(layers, 4, trainable=trainable)\n layers = d_conv_layer(layers, 8, 1, trainable=trainable)\n layers = Flatten()(layers)\n layers = Dense(\n 1, name=\"d_output\", activation='sigmoid', trainable=trainable)(layers)\n return layers\n\n\ndef g_conv_layer(inputs, multiplier, trainable=True):\n x = Conv2DTranspose(\n DEPTH * multiplier,\n KERNEL,\n name='g_conv_%s' % multiplier,\n padding=PADDING,\n trainable=trainable)(inputs)\n x = BatchNormalization(momentum=BATCH_NORM)(x)\n x = LeakyReLU(alpha=0.3)(x)\n x = UpSampling2D()(x)\n return x\n\n\ndef generator_layers(inputs, trainable=True):\n size = 3\n x = Dense(\n DEPTH * 8 * size * size,\n name=\"g_dense\",\n input_shape=inputs.shape,\n trainable=trainable)(inputs)\n x = BatchNormalization(momentum=BATCH_NORM)(x)\n x = Activation(ACTIVATION)(x)\n x = Reshape((size, size, DEPTH * 8))(x)\n x = Dropout(DROPOUT)(x)\n x = g_conv_layer(x, 4)\n x = g_conv_layer(x, 2)\n x = ZeroPadding2D((1, 1))(x)\n x = g_conv_layer(x, 1)\n x = Conv2DTranspose(\n 1, KERNEL, name='g_output', padding=PADDING, trainable=trainable)(x)\n x = Activation('tanh')(x)\n return x\n\n\ndef train_generator():\n gen_inputs = Input(shape=(100, ))\n gen_layers = generator_layers(gen_inputs, trainable=True)\n disc_layers = discriminator_layers(gen_layers, trainable=False)\n adversarial = Model(inputs=gen_inputs, outputs=disc_layers)\n adversarial.compile(\n optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n adversarial.summary()\n\n if pathlib.Path(\"./adversarial.hdf5\").is_file():\n log.info(\"LOADING ADVERSARIAL WEIGHTS\")\n adversarial.load_weights(\"adversarial.hdf5\", by_name=True)\n if pathlib.Path(\"./discriminator.hdf5\").is_file():\n log.info(\"LOADING DISCRIMINATOR WEIGHTS\")\n adversarial.load_weights(\"discriminator.hdf5\", by_name=True)\n\n inputs = random((SAMPLE_SIZE, 100))\n labels = numpy.ones(SAMPLE_SIZE)\n\n data_splits = get_generator_splits(inputs, labels)\n\n checkpointer = keras.callbacks.ModelCheckpoint(\n \"adversarial.hdf5\",\n monitor='val_loss',\n verbose=1,\n save_best_only=True,\n save_weights_only=True,\n mode='auto',\n period=1)\n stopper = keras.callbacks.EarlyStopping(\n monitor='val_loss', patience=PATIENCE, min_delta=.02, verbose=1)\n\n adversarial.fit(\n data_splits[\"train\"][0],\n data_splits[\"train\"][1],\n epochs=10,\n batch_size=32,\n verbose=1,\n callbacks=[checkpointer, stopper],\n validation_split=0.8\n )\n\n scores = adversarial.evaluate(data_splits[\"test\"][0], data_splits[\"test\"][1])\n log.info(\"METRICS: %s %s\" % (adversarial.metrics_names, scores))\n\n\ndef generator_model(trainable):\n log.info(\"MAKING GENERATOR MODEL\")\n gen_inputs = Input(shape=(100, ))\n gen_layers = generator_layers(gen_inputs, trainable=trainable)\n generator = Model(inputs=gen_inputs, outputs=gen_layers)\n if pathlib.Path(\"./adversarial.hdf5\").is_file():\n log.info(\"LOADING GENERATOR WEIGHTS\")\n generator.load_weights(\"adversarial.hdf5\", by_name=True)\n return generator\n\n\ndef generate_fakes(quantity):\n generator = generator_model(False)\n generator.compile(\n optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n noise = random((quantity, 100))\n log.info(\"GENERATING FAKES\")\n fakes = generator.predict(noise, verbose=1)\n return fakes\n\n\ndef get_reals(quantity):\n reals = MNIST[:quantity, :, :]\n reals = numpy.expand_dims(reals, axis=3)\n return reals \n\ndef get_discriminator_splits(inputs, labels):\n training_bound = int(len(inputs) * 0.7)\n\n shuffled, shuffled_labels = shuffle(inputs, labels)\n\n X_train = shuffled[:training_bound, :, :]\n X_test = shuffled[training_bound:, :, :]\n\n y_train = shuffled_labels[:training_bound]\n y_test = shuffled_labels[training_bound:]\n\n return {\n \"train\": [X_train, y_train],\n \"test\": [X_test, y_test]\n }\n\ndef get_generator_splits(inputs, labels):\n training_bound = int(len(inputs) * 0.7)\n\n shuffled, shuffled_labels = shuffle(inputs, labels)\n\n X_train = shuffled[:training_bound, :]\n X_test = shuffled[training_bound:, :]\n\n y_train = shuffled_labels[:training_bound]\n y_test = shuffled_labels[training_bound:]\n\n return {\n \"train\": [X_train, y_train],\n \"test\": [X_test, y_test],\n }\n\ndef get_disc_data_sets(sample_size):\n \"\"\"returns training, test, and validation sets\"\"\"\n log.info(\"MAKING DATA SETS\")\n\n fakes = generate_fakes(int(sample_size / 2))\n reals = get_reals(int(sample_size / 2))\n total = numpy.concatenate((reals, fakes))\n real_labels = numpy.ones([len(reals)])\n fake_labels = numpy.zeros([len(fakes)])\n total_labels = numpy.concatenate((real_labels, fake_labels))\n return get_discriminator_splits(total, total_labels)\n\n\n\ndef discriminator_model(trainable):\n log.info(\"MAKING DISCRIMINATOR MODEL\")\n disc_inputs = Input(shape=(28, 28, 1))\n disc_layers = discriminator_layers(disc_inputs, trainable=trainable)\n discriminator = Model(inputs=disc_inputs, outputs=disc_layers)\n if pathlib.Path(\"./discriminator.hdf5\").is_file():\n log.info(\"LOADING DISCRIMINATOR WEIGHTS\")\n discriminator.load_weights(\"discriminator.hdf5\", by_name=True)\n return discriminator\n\n\ndef train_discriminator():\n data_sets = get_disc_data_sets(SAMPLE_SIZE)\n discriminator = get_compiled_discriminator()\n checkpointer = keras.callbacks.ModelCheckpoint(\n \"discriminator.hdf5\",\n monitor='val_loss',\n verbose=1,\n save_best_only=True,\n save_weights_only=True,\n mode='auto',\n period=1)\n discriminator.fit(\n data_sets[\"train\"][0],\n data_sets[\"train\"][1],\n epochs=1,\n batch_size=32,\n verbose=1,\n callbacks=[checkpointer],\n validation_split=0.2\n )\n\n scores = discriminator.evaluate(data_sets[\"test\"][0], data_sets[\"test\"][1])\n log.info(\"METRICS: %s %s\" % (discriminator.metrics_names, scores))\n\ndef get_compiled_discriminator():\n discriminator = discriminator_model(True)\n discriminator.compile(\n optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])\n return discriminator\n\ndef show_classified_images():\n fakes = generate_fakes(5)\n reals = get_reals(5)\n total = numpy.concatenate((fakes, reals))\n labels = numpy.concatenate((numpy.zeros(len(fakes)), numpy.ones(len(reals))))\n total = numpy.squeeze(total)\n total = numpy.expand_dims(total, axis=3)\n\n total = shuffle(total)\n\n discriminator = get_compiled_discriminator()\n classes = discriminator.predict(total, verbose=1)\n\n total = numpy.squeeze(total)\n\n w = 10\n h = 10\n fig = plt.figure(figsize=(8, 8))\n rows = 2\n cols = 5\n for i in range(1, rows * cols + 1):\n ax = fig.add_subplot(rows, cols, i)\n ax.set_title(\"Class: %i\" % (classes[i-1] > 0.5))\n plt.imshow(total[i-1])\n plt.show()\n\n\n# for i in range(10):\n# train_discriminator()\n# train_generator()\n\n# train_generator()\n#train_discriminator()\nshow_classified_images()\n\n# print(get_disc_data_sets(10)[\"train\"][1])\n", "sub_path": "GAN/mnist.py", "file_name": "mnist.py", "file_ext": "py", "file_size_in_byte": 9026, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "logging.basicConfig", "line_number": 23, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 23, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 23, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 24, "usage_type": "call"}, {"api_name": "keras.datasets.mnist.load_data", "line_number": 26, "usage_type": "call"}, {"api_name": "keras.datasets.mnist", "line_number": 26, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 27, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 42, "usage_type": "call"}, {"api_name": "keras.layers.advanced_activations.LeakyReLU", "line_number": 50, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 51, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 52, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 61, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 62, "usage_type": "call"}, {"api_name": "keras.layers.Conv2DTranspose", "line_number": 68, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 74, "usage_type": "call"}, {"api_name": "keras.layers.advanced_activations.LeakyReLU", "line_number": 75, "usage_type": "call"}, {"api_name": "keras.layers.UpSampling2D", "line_number": 76, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 82, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 87, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 88, "usage_type": "call"}, {"api_name": "keras.layers.Reshape", "line_number": 89, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 90, "usage_type": "call"}, {"api_name": "keras.layers.ZeroPadding2D", "line_number": 93, "usage_type": "call"}, {"api_name": "keras.layers.Conv2DTranspose", "line_number": 95, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 97, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 102, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 105, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 110, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 118, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 122, "usage_type": "call"}, {"api_name": "keras.callbacks", "line_number": 122, "usage_type": "attribute"}, {"api_name": "keras.callbacks.EarlyStopping", "line_number": 130, "usage_type": "call"}, {"api_name": "keras.callbacks", "line_number": 130, "usage_type": "attribute"}, {"api_name": "keras.layers.Input", "line_number": 149, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 151, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 172, "usage_type": "call"}, {"api_name": "sklearn.utils.shuffle", "line_number": 178, "usage_type": "call"}, {"api_name": "sklearn.utils.shuffle", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 216, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 223, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 225, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 226, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 235, "usage_type": "call"}, {"api_name": "keras.callbacks", "line_number": 235, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 265, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 266, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 266, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 266, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 267, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 268, "usage_type": "call"}, {"api_name": "sklearn.utils.shuffle", "line_number": 270, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 275, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 279, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 279, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 285, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 285, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 286, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 286, "usage_type": "name"}]} +{"seq_id": "222311841", "text": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom simple.items import SimpleItem\n\n\nclass AndroidSpider(scrapy.Spider):\n name = 'android'\n #allowed_domains = ['http://www.androidquestions.org']\n def start_requests(self):\n url = 'http://www.androidquestions.org/search.php?do=process'\n key_list = ['recover message', 'recover photos', 'recover contacts', 'recover messages',\n 'recover call history', 'recover whatsapp']\n for key in key_list:\n yield scrapy.FormRequest(\n url=url,\n formdata={'query': key},\n callback=self.parse\n )\n\n def parse(self, response):\n node_list = response.xpath(\"//li[starts-with(@id,'thread_')]\")\n for node in node_list:\n item = SimpleItem()\n item['title'] = node.xpath(\".//a[starts-with(@id,'thread_title')]/text()\").extract()[0]\n item['sourceURL'] = \"http://www.androidquestions.org/\"+node.xpath(\".//a[starts-with(@id,'thread_title')]/@href\").extract()[0]\n time0 = node.xpath(\".//div[@class='author']/span\")\n time1 = time0[0].xpath('string(.)').extract()[0]\n time2 = time1[-20:]\n item['time'] = time2\n item['host'] = node.xpath(\"//div[@class='author']/span/a/text()\").extract()[0]\n yield item\n\n if len(response.xpath(\"//a[@rel='next']/@href\")):\n url = response.xpath(\"//a[@rel='next']/@href\").extract()[0]\n yield scrapy.Request(\"http://www.androidquestions.org/\" + url, callback=self.parse)\n", "sub_path": "simple/simple/spiders/android.py", "file_name": "android.py", "file_ext": "py", "file_size_in_byte": 1582, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "scrapy.Spider", "line_number": 6, "usage_type": "attribute"}, {"api_name": "scrapy.FormRequest", "line_number": 14, "usage_type": "call"}, {"api_name": "simple.items.SimpleItem", "line_number": 23, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "336736269", "text": "from django.test import TestCase, Client\nfrom rest_framework.test import APIClient\n\n\nfrom library_project.settings import REST_FRAMEWORK as drf_configs\nfrom books_app.models import Author\nimport json\n# Create your tests here.\n\nclass AuthorViewsTest(TestCase):\n\n client = APIClient()\n\n AUTHORS = [\n 'Mário de Andrade',\n 'Clarice Linspector',\n 'Carlos Drummond de Andrade',\n 'Guimarães Rosa',\n 'William Shakespeare',\n 'Jorge Amado',\n 'Graciliano Ramos',\n 'J.K. Rowling',\n 'José de Alencar',\n 'Cecília Meireles',\n 'Monteiro Lobato',\n 'Vinicius de Moraes',\n 'José Saramago'\n ]\n\n def setUp(self):\n \"\"\"\n Setting up test database\n \"\"\"\n authors = [Author(name=name) for name in self.AUTHORS]\n Author.objects.bulk_create(authors, ignore_conflicts=True)\n\n def test_list_authors_response_ok(self):\n \"\"\"\n Test listing all authors endpoint\n \"\"\"\n response = self.client.get(\"/authors/\")\n self.assertEquals(response.status_code, 200)\n \n def test_list_all_authors_json_content(self):\n \"\"\"\n Test GET (list) authors response content\n \"\"\"\n response = self.client.get(\"/authors/\")\n self.assertEquals(response.status_code, 200)\n content = json.loads(response.content)\n self.assertEquals(content['count'], Author.objects.all().count())\n self.assertEquals(sorted(content['results'][0].keys()), sorted(['id', 'name']))\n\n def test_list_all_authors_pagination(self):\n \"\"\"\n Test first page retrieving all results possible for one page size, full page of authors\n \"\"\"\n if len(self.AUTHORS) < drf_configs[\"PAGE_SIZE\"]:\n Author.objects.bulk_create((Author(name=f\"author{x}\") for x in range(drf_configs[\"PAGE_SIZE\"]-len(self.AUTHORS))))\n response = self.client.get(\"/authors/?page=1\")\n self.assertEquals(response.status_code, 200)\n content = json.loads(response.content)\n self.assertEquals(len(content['results']), drf_configs['PAGE_SIZE'])\n\n def test_list_all_authors_page_with_less_results(self):\n \"\"\"\n Test page with less than the page limit size\n \"\"\"\n if len(self.AUTHORS) < drf_configs[\"PAGE_SIZE\"]:\n Author.objects.bulk_create((Author(name=f\"author{x}\") for x in range(drf_configs[\"PAGE_SIZE\"]+1)))\n response = self.client.get(\"/authors/?page=2\")\n\n self.assertEquals(response.status_code, 200)\n content = json.loads(response.content)\n self.assertTrue(len(content['results']) < drf_configs['PAGE_SIZE'])\n \n def test_search_authors_by_exact_name(self):\n \"\"\"\n Test the name filtering in authors request with full name\n \"\"\"\n response = self.client.get(f\"/authors/?name={self.AUTHORS[2]}\")\n self.assertEquals(response.status_code, 200)\n # should get only one Author\n content = json.loads(response.content)\n self.assertEquals(len(content['results']), 1)\n\n def test_search_authors_containing_query(self):\n \"\"\"\n Test the name filtering in authors request, need to retrieve more than one content,\n if substring is present in more than one name\n \"\"\"\n response = self.client.get(f\"/authors/?name=José\")\n self.assertEquals(response.status_code, 200)\n content = json.loads(response.content)\n # should get two authors which names contains the letter 'c'\n self.assertEquals(len(content['results']), 2)\n\n def test_search_authors_retrieves_nothing(self):\n \"\"\"\n Test GET request returns empty list when filtering a name\n \"\"\"\n response = self.client.get(f\"/authors/?name=Gustavo\")\n self.assertEquals(response.status_code, 200)\n content = json.loads(response.content)\n self.assertEquals(len(content['results']), 0)\n \n def test_get_author(self):\n \"\"\"\n Test specific author content\n \"\"\"\n response = self.client.get(f\"/authors/1/\")\n self.assertEquals(response.status_code, 200)\n # should get the first author in thist test database list\n content = json.loads(response.content)\n self.assertEquals(content['name'], self.AUTHORS[0])\n\n def test_get_author_fails(self):\n \"\"\"\n Test not to retrieve an author, when invalid id is passed\n \"\"\"\n response = self.client.get(f\"/authors/20/\")\n # there is no author with id 10, should fail\n self.assertEquals(response.status_code, 404)\n ", "sub_path": "library_project/books_app/tests/tests_author.py", "file_name": "tests_author.py", "file_ext": "py", "file_size_in_byte": 4646, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.test.TestCase", "line_number": 10, "usage_type": "name"}, {"api_name": "rest_framework.test.APIClient", "line_number": 12, "usage_type": "call"}, {"api_name": "books_app.models.Author", "line_number": 34, "usage_type": "call"}, {"api_name": "books_app.models.Author.objects.bulk_create", "line_number": 35, "usage_type": "call"}, {"api_name": "books_app.models.Author.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "books_app.models.Author", "line_number": 35, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 50, "usage_type": "call"}, {"api_name": "books_app.models.Author.objects.all", "line_number": 51, "usage_type": "call"}, {"api_name": "books_app.models.Author.objects", "line_number": 51, "usage_type": "attribute"}, {"api_name": "books_app.models.Author", "line_number": 51, "usage_type": "name"}, {"api_name": "library_project.settings.REST_FRAMEWORK", "line_number": 58, "usage_type": "name"}, {"api_name": "books_app.models.Author.objects.bulk_create", "line_number": 59, "usage_type": "call"}, {"api_name": "books_app.models.Author.objects", "line_number": 59, "usage_type": "attribute"}, {"api_name": "books_app.models.Author", "line_number": 59, "usage_type": "name"}, {"api_name": "library_project.settings.REST_FRAMEWORK", "line_number": 59, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 62, "usage_type": "call"}, {"api_name": "library_project.settings.REST_FRAMEWORK", "line_number": 63, "usage_type": "name"}, {"api_name": "library_project.settings.REST_FRAMEWORK", "line_number": 69, "usage_type": "name"}, {"api_name": "books_app.models.Author.objects.bulk_create", "line_number": 70, "usage_type": "call"}, {"api_name": "books_app.models.Author.objects", "line_number": 70, "usage_type": "attribute"}, {"api_name": "books_app.models.Author", "line_number": 70, "usage_type": "name"}, {"api_name": "library_project.settings.REST_FRAMEWORK", "line_number": 70, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 74, "usage_type": "call"}, {"api_name": "library_project.settings.REST_FRAMEWORK", "line_number": 75, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 84, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 94, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 104, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 114, "usage_type": "call"}]} +{"seq_id": "493346970", "text": "from django.test import TestCase\n\nfrom django.contrib.auth.models import User\n\nfrom mock import patch\n\nfrom ..models import Customer\n\n\nclass TestCustomer(TestCase):\n \n def setUp(self):\n self.user = User.objects.create_user(username=\"patrick\")\n self.customer = Customer.objects.create(\n user=self.user,\n stripe_id=\"cus_xxxxxxxxxxxxxxx\",\n card_fingerprint=\"YYYYYYYY\",\n card_last_4=\"2342\",\n card_kind=\"Visa\"\n )\n \n @patch(\"stripe.Customer.retrieve\")\n def test_customer_purge_leaves_customer_record(self, CustomerRetrieveMock):\n self.customer.purge()\n customer = Customer.objects.get(stripe_id=self.customer.stripe_id)\n self.assertTrue(customer.user is None)\n self.assertTrue(customer.card_fingerprint == \"\")\n self.assertTrue(customer.card_last_4 == \"\")\n self.assertTrue(customer.card_kind == \"\")\n self.assertTrue(User.objects.filter(pk=self.user.pk).exists())\n \n @patch(\"stripe.Customer.retrieve\")\n def test_customer_delete_same_as_purge(self, CustomerRetrieveMock):\n self.customer.delete()\n customer = Customer.objects.get(stripe_id=self.customer.stripe_id)\n self.assertTrue(customer.user is None)\n self.assertTrue(customer.card_fingerprint == \"\")\n self.assertTrue(customer.card_last_4 == \"\")\n self.assertTrue(customer.card_kind == \"\")\n self.assertTrue(User.objects.filter(pk=self.user.pk).exists())\n \n def test_change_charge(self):\n self.assertTrue(self.customer.can_charge())\n \n @patch(\"stripe.Customer.retrieve\")\n def test_cannot_charge(self, CustomerRetrieveMock):\n self.customer.delete()\n self.assertFalse(self.customer.can_charge())\n", "sub_path": "payments/tests/test_customer.py", "file_name": "test_customer.py", "file_ext": "py", "file_size_in_byte": 1770, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.test.TestCase", "line_number": 10, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 13, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 13, "usage_type": "name"}, {"api_name": "models.Customer.objects.create", "line_number": 14, "usage_type": "call"}, {"api_name": "models.Customer.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "models.Customer", "line_number": 14, "usage_type": "name"}, {"api_name": "models.Customer.objects.get", "line_number": 25, "usage_type": "call"}, {"api_name": "models.Customer.objects", "line_number": 25, "usage_type": "attribute"}, {"api_name": "models.Customer", "line_number": 25, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 30, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 30, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 22, "usage_type": "call"}, {"api_name": "models.Customer.objects.get", "line_number": 35, "usage_type": "call"}, {"api_name": "models.Customer.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "models.Customer", "line_number": 35, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 40, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 40, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 32, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "359563871", "text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\n\nPATH = \"/home/yoanapaleva/Documents/Machine-Learning/internet-traffic-data-in-bits-fr.csv\"\ndf = pd.read_csv(PATH)\n\nfirst_col = df[\"Time\"].str.split(\" \", n=1, expand=True)\n\ndf[\"Date\"] = first_col[0]\ndf[\"Time\"] = first_col[1]\n\ndate = df[\"Date\"].str.split(\"/\", expand=True)\ndf[\"Month\"] = date[0]\ndf[\"Day\"] = date[1]\ndf[\"Year\"] = date[2]\ndf.drop(\"Date\", 1, inplace=True)\n\nX_train, X_test = train_test_split(df, test_size=0.2)\ny_train = X_train[\"Bits\"]\ny_test = X_test[\"Bits\"]\nprint(X_train.size)\nprint(X_test.size)\nprint(y_train.size)\nprint(y_test.size)\n\n# df.sort_values(\"Year\", inplace=True) (already sorted)\nprint(df.head())\n\nplt.plot(df['Time'], df['Bits'])\n# plt.show()\n\n\nbits = df.loc[:, 'Bits'].values\ns = bits.size # 19888\n\ntrain_data = bits[:18000]\ntest_data = bits[18000:]\n\nscaler = MinMaxScaler()\ntrain_data = train_data.reshape(-1, 1)\ntest_data = test_data.reshape(-1, 1)\n\nwindow_size = 2000\nfor i in range(0, 16000, window_size):\n scaler.fit(train_data[i:i + window_size, :])\n train_data[i:i + window_size, :] = scaler.transform(train_data[i:i + window_size, :])\n\nscaler.fit(train_data[i + window_size:, :])\ntrain_data[i + window_size:, :] = scaler.transform(train_data[i + window_size:, :])\n\ntrain_data = train_data.reshape(-1)\ntest_data = scaler.transform(test_data).reshape(-1)\n\nfeaturesTrain = torch.from_numpy(X_train)\nnew_list = list(map(int, X_train))\ne = torch.tensor(new_list)\ntargetsTrain = torch.from_numpy(y_train).type(torch.LongTensor)\n\nfeaturesTest = torch.from_numpy(X_test)\ntargetsTest = torch.from_numpy(y_test).type(torch.LongTensor)\n\n# batch_size, epoch and iteration\nbatch_size = 100\nn_iters = 10000\nnum_epochs = n_iters / (len(X_train) / batch_size)\nnum_epochs = int(num_epochs)\n\n# Pytorch train and test sets\ntrain = torch.utils.data.TensorDataset(featuresTrain,targetsTrain)\ntest = torch.utils.data.TensorDataset(featuresTest,targetsTest)\n\n# data loader\ntrain_loader = torch.utils.data.DataLoader(train, batch_size = batch_size, shuffle = False)\ntest_loader = torch.utils.data.DataLoader(test, batch_size = batch_size, shuffle = False)\n\nclass RNNModel(nn.Module):\n def __init__(self, input_dim, hidden_dim, layer_dim, output_dim):\n super(RNNModel, self).__init__()\n # Number of hidden dimensions\n self.hidden_dim = hidden_dim\n\n # Number of hidden layers\n self.layer_dim = layer_dim\n\n # RNN\n self.rnn = nn.RNN(input_dim, hidden_dim, layer_dim, batch_first=True,\n nonlinearity='relu')\n\n # Readout layer\n self.fc = nn.Linear(hidden_dim, output_dim)\n\n def forward(self, x):\n # Initialize hidden state with zeros\n h0 = Variable(torch.zeros(self.layer_dim, x.size(0), self.hidden_dim))\n\n # One time step\n out, hn = self.rnn(x, h0)\n out = self.fc(out[:, -1, :])\n return out\n\n\n# batch_size, epoch and iteration\nbatch_size = 100\nn_iters = 2500\nnum_epochs = n_iters / (len(X_train) / batch_size)\nnum_epochs = int(num_epochs)\n\n# Pytorch train and test sets\ntrain = torch.utils.data.TensorDataset(featuresTrain, targetsTrain)\ntest = torch.utils.data.TensorDataset(featuresTest, targetsTest)\n\n# data loader\ntrain_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=False)\ntest_loader = torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=False)\n\n# Create RNN\ninput_dim = 28 # input dimension\nhidden_dim = 100 # hidden layer dimension\nlayer_dim = 2 # number of hidden layers\noutput_dim = 10 # output dimension\n\nmodel = RNNModel(input_dim, hidden_dim, layer_dim, output_dim)\n\n# Cross Entropy Loss\nerror = nn.CrossEntropyLoss()\n\n# SGD Optimizer\nlearning_rate = 0.05\noptimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)\n", "sub_path": "RNN-Network-Traffic.py", "file_name": "RNN-Network-Traffic.py", "file_ext": "py", "file_size_in_byte": 4019, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pandas.read_csv", "line_number": 12, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 64, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 67, "usage_type": "attribute"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 76, "usage_type": "attribute"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 77, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 80, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 81, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 83, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 83, "usage_type": "name"}, {"api_name": "torch.nn.RNN", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 93, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 97, "usage_type": "name"}, {"api_name": "torch.autograd.Variable", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 116, "usage_type": "attribute"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 117, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 117, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 120, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 121, "usage_type": "attribute"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 132, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 132, "usage_type": "name"}, {"api_name": "torch.optim.SGD", "line_number": 136, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 136, "usage_type": "attribute"}]} +{"seq_id": "46512451", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('contenttypes', '0001_initial'),\n ('core', '0019_auto_20150602_1829'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Question',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),\n ('object_id', models.PositiveIntegerField()),\n ('name', models.CharField(max_length=256)),\n ('content_type', models.ForeignKey(to='contenttypes.ContentType')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AlterModelOptions(\n name='dogability',\n options={'ordering': ['-updated']},\n ),\n migrations.AlterField(\n model_name='dogability',\n name='trick',\n field=models.ForeignKey(to='core.Trick', related_name='dog_abilities'),\n preserve_default=True,\n ),\n ]\n", "sub_path": "dogs/core/migrations/0020_auto_20150906_1732.py", "file_name": "0020_auto_20150906_1732.py", "file_ext": "py", "file_size_in_byte": 1141, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.PositiveIntegerField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterModelOptions", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 31, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 31, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 34, "usage_type": "name"}]} +{"seq_id": "550646207", "text": "from flask import Flask, render_template, request, jsonify\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n@app.route(\"/posts\", methods=[\"POST\"])\ndef posts():\n\n # start = request.form.get(\"start\")\n # end = request.form.get(\"end\")\n start = 1\n end = 10\n\n posts = []\n for i in range(start, end + 1):\n posts.append(f\"Post #{i}\")\n return jsonify(posts)\n", "sub_path": "6.front-ends/posts1/application.py", "file_name": "application.py", "file_ext": "py", "file_size_in_byte": 403, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "flask.Flask", "line_number": 3, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "184113097", "text": "from commons_web import init, _UNINSTALL_FLAG, EMAIL, PASSWORD, USERNAME, BIRTHDAY, FIRSTNAME, LASTNAME, PHONENUMBER, HEIGHT, WEIGHT, AGE, GENDER, LOCATION, ZIPCODE, SSN, DLN, CREDITCARD\nfrom commons_web import dump_text, find_clickable, find_editables, fill_edits, nicely_quit, signal_handler, click, shutdown\nimport time, re, sys, signal\nsignal.signal(signal.SIGINT, signal_handler)\nsignal.signal(signal.SIGPIPE, signal_handler)\nsignal.signal(signal.SIGTERM, signal_handler)\n\nimport atexit\natexit.register(shutdown)\n\ninit(sys.argv[1])\nfrom commons_web import session\n\nTIMELIMIT=30 # seconds\nSTATES=[\"BEGIN\", \"STATE LOGIN\", \"STATE FINDING ADF\", \"STATE DELETING ACCOUNT\", \"SIGNAL TERMINATED\"]\n\nRANDOM_STR=[\"Random\",EMAIL,PASSWORD,USERNAME,BIRTHDAY,FIRSTNAME,LASTNAME,PHONENUMBER,HEIGHT,WEIGHT,AGE,GENDER,LOCATION,ZIPCODE]\n\n\ndef dump_text_wrapper(state):\n global session\n print(\"| \" + state + \" | \")\n dump_text()\n\n\nfind_clickable_widget = find_clickable\n\n#--------------- 6 sets ----------------------#\nlogin_clickable_set=\"continue with email|log in|sign in|login or register|register email|already have an account|i have an account|get started|next|continue|agree|start|existing user|next|done|agree|continue\"\nlogin_editable_set = {\".*e-?mail.*\":EMAIL,\n \".*password.*\":PASSWORD,\n \".*user ?name.*\":USERNAME,\n \".*phone.*\":PHONENUMBER,\n \".*first ?name.*\":FIRSTNAME,\n \".*last ?name.*\":LASTNAME,\n \".*birthday.*|.*date of birth.*\":BIRTHDAY,\n }\nadf_clickable_set=\"more|profile|me|setting|privacy|edit account|account|drawer|navigation|skip|change email|manage data|menu|landing|delete data\"\nadf_editable_set=None\ndelete_clickable_set=\"delete account|delete my account|delete|disable account|close account|close my account|ok|yes|agree|confirm|submit|send email|verify|continue|next\"\ndelete_editable_set = {\".*e-?mail.*\":EMAIL,\n \".*password.*\":PASSWORD,\n \".*user ?name.*\":USERNAME,\n \".*code.*\":'1234'\n }\n\n#--------------- BEGIN --------------------------#\ndump_text_wrapper(STATES[0])\n\n#---------------- State Login -------------------#\ndef state_login():\n global login_editable_set\n global login_clickable_set\n timeout = time.time() + TIMELIMIT\n countle = 0\n while True:\n le=fill_edits(login_editable_set)\n lc=find_clickable_widget(login_clickable_set)\n if lc:\n click(lc)\n #lc.click()\n dump_text_wrapper(STATES[1])\n if le:\n countle += 1\n time.sleep(3)\n if countle >= 3 or time.time() > timeout:\n break\n import ipdb; ipdb.set_trace(context=1)\n\n#--------------- State Finding ADF -------------#\ndef state_finding_adf():\n global adf_clickable_set\n global session\n timeout = time.time() + TIMELIMIT\n while True:\n dc=find_clickable_widget(delete_clickable_set)\n if dc:\n dump_text_wrapper(STATES[2])\n break\n ac=find_clickable_widget(adf_clickable_set)\n if ac:\n click(ac)\n #ac.click()\n dump_text_wrapper(STATES[2])\n if time.time() > timeout:\n break\n time.sleep(3)\n import ipdb; ipdb.set_trace(context=1)\n\n\n#--------------- State Deleting Account --------#\ndef state_delete_account():\n global delete_editable_set\n global delete_clickable_set\n timeout = time.time() + TIMELIMIT\n countle = 0\n while True:\n le=fill_edits(delete_editable_set)\n lc=find_clickable_widget(delete_clickable_set)\n if lc:\n click(lc)\n #lc.click()\n dump_text_wrapper(STATES[3])\n #break\n if le:\n countle += 1\n time.sleep(3)\n if countle >= 3 or time.time() > timeout:\n break\n import ipdb; ipdb.set_trace(context=1)\n\n\nstate_login()\nstate_finding_adf()\nstate_delete_account()\n", "sub_path": "LeftoverAccountAnalyzer/lai_web/delete.py", "file_name": "delete.py", "file_ext": "py", "file_size_in_byte": 4035, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "signal.signal", "line_number": 4, "usage_type": "call"}, {"api_name": "commons_web.signal_handler", "line_number": 4, "usage_type": "argument"}, {"api_name": "signal.SIGINT", "line_number": 4, "usage_type": "attribute"}, {"api_name": "signal.signal", "line_number": 5, "usage_type": "call"}, {"api_name": "commons_web.signal_handler", "line_number": 5, "usage_type": "argument"}, {"api_name": "signal.SIGPIPE", "line_number": 5, "usage_type": "attribute"}, {"api_name": "signal.signal", "line_number": 6, "usage_type": "call"}, {"api_name": "commons_web.signal_handler", "line_number": 6, "usage_type": "argument"}, {"api_name": "signal.SIGTERM", "line_number": 6, "usage_type": "attribute"}, {"api_name": "atexit.register", "line_number": 9, "usage_type": "call"}, {"api_name": "commons_web.shutdown", "line_number": 9, "usage_type": "argument"}, {"api_name": "commons_web.init", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 11, "usage_type": "attribute"}, {"api_name": "commons_web.EMAIL", "line_number": 17, "usage_type": "name"}, {"api_name": "commons_web.PASSWORD", "line_number": 17, "usage_type": "name"}, {"api_name": "commons_web.USERNAME", "line_number": 17, "usage_type": "name"}, {"api_name": "commons_web.BIRTHDAY", "line_number": 17, "usage_type": "name"}, {"api_name": "commons_web.FIRSTNAME", "line_number": 17, "usage_type": "name"}, {"api_name": "commons_web.LASTNAME", "line_number": 17, "usage_type": "name"}, {"api_name": "commons_web.PHONENUMBER", "line_number": 17, "usage_type": "name"}, {"api_name": "commons_web.HEIGHT", "line_number": 17, "usage_type": "name"}, {"api_name": "commons_web.WEIGHT", "line_number": 17, "usage_type": "name"}, {"api_name": "commons_web.AGE", "line_number": 17, "usage_type": "name"}, {"api_name": "commons_web.GENDER", "line_number": 17, "usage_type": "name"}, {"api_name": "commons_web.LOCATION", "line_number": 17, "usage_type": "name"}, {"api_name": "commons_web.ZIPCODE", "line_number": 17, "usage_type": "name"}, {"api_name": "commons_web.dump_text", "line_number": 23, "usage_type": "call"}, {"api_name": "commons_web.find_clickable", "line_number": 26, "usage_type": "name"}, {"api_name": "commons_web.EMAIL", "line_number": 30, "usage_type": "name"}, {"api_name": "commons_web.PASSWORD", "line_number": 31, "usage_type": "name"}, {"api_name": "commons_web.USERNAME", "line_number": 32, "usage_type": "name"}, {"api_name": "commons_web.PHONENUMBER", "line_number": 33, "usage_type": "name"}, {"api_name": "commons_web.FIRSTNAME", "line_number": 34, "usage_type": "name"}, {"api_name": "commons_web.LASTNAME", "line_number": 35, "usage_type": "name"}, {"api_name": "commons_web.BIRTHDAY", "line_number": 36, "usage_type": "name"}, {"api_name": "commons_web.EMAIL", "line_number": 41, "usage_type": "name"}, {"api_name": "commons_web.PASSWORD", "line_number": 42, "usage_type": "name"}, {"api_name": "commons_web.USERNAME", "line_number": 43, "usage_type": "name"}, {"api_name": "time.time", "line_number": 54, "usage_type": "call"}, {"api_name": "commons_web.fill_edits", "line_number": 57, "usage_type": "call"}, {"api_name": "commons_web.click", "line_number": 60, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 65, "usage_type": "call"}, {"api_name": "time.time", "line_number": 66, "usage_type": "call"}, {"api_name": "ipdb.set_trace", "line_number": 68, "usage_type": "call"}, {"api_name": "time.time", "line_number": 74, "usage_type": "call"}, {"api_name": "commons_web.click", "line_number": 82, "usage_type": "call"}, {"api_name": "time.time", "line_number": 85, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 87, "usage_type": "call"}, {"api_name": "ipdb.set_trace", "line_number": 88, "usage_type": "call"}, {"api_name": "time.time", "line_number": 95, "usage_type": "call"}, {"api_name": "commons_web.fill_edits", "line_number": 98, "usage_type": "call"}, {"api_name": "commons_web.click", "line_number": 101, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 107, "usage_type": "call"}, {"api_name": "time.time", "line_number": 108, "usage_type": "call"}, {"api_name": "ipdb.set_trace", "line_number": 110, "usage_type": "call"}]} +{"seq_id": "310718442", "text": "import sys\nif sys.argv[1]=='help':\n print('Arguments: history folder path')\n exit(0)\n\nimport pickle\nimport matplotlib.pyplot as plt\nimport os\n\nmodel_history_dir=sys.argv[1]\nmodel_history_files=[model_history_dir + x for x in os.listdir(model_history_dir) if x.endswith('.pickle') and not x.startswith('._')]\n\n\nfor path in model_history_files:\n\tfilename=os.path.basename(path)[:-7]\n\tprint(path)\n\tpickle_in=open(path,'rb')\n\tmodel_history=pickle.load(pickle_in)\n\n\tloss = model_history['loss']\n\tacc = model_history['acc']\n\tval_loss = model_history['val_loss']\n\tval_acc = model_history['val_acc']\n\n\tfor x in range(len(loss)):\n\t\tprint('loss: ', loss[x], 'acc: ', acc[x], 'val_loss: ', val_loss[x], 'val_acc: ', val_acc[x])\n\n\tname='Model: ' +filename +', Train and Val acc'\n\tepochs = range(1, len(acc) + 1)\n\tplt.figure()\n\tplt.plot(epochs, acc, 'bo', label='Training acc')\n\tplt.plot(epochs, val_acc, 'b', label='Validation acc')\n\tplt.title(name)\n\tplt.legend()\n\tplt.savefig('modelHistory/visuals/' +filename +'_Acc.png')\n\tplt.close(name)\n\n\tname='Model: ' +filename +', Train and Val loss'\n\tplt.figure()\n\tplt.plot(epochs, loss, 'bo', label='Training loss')\n\tplt.plot(epochs, val_loss, 'b', label='Validation loss')\n\tplt.title(name)\n\tplt.legend()\n\tplt.savefig('modelHistory/visuals/' +filename +'_Loss.png')\n\tplt.close(name)", "sub_path": "historyViewer4Directory.py", "file_name": "historyViewer4Directory.py", "file_ext": "py", "file_size_in_byte": 1319, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "sys.argv", "line_number": 2, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}]} +{"seq_id": "281351328", "text": "from collections import Counter\r\n\r\nif __name__ == \"__main__\":\r\n X = int(input())\r\n shoe_sizes = list(map(int, input().split()))\r\n shoe_counter = Counter(shoe_sizes)\r\n N = int(input())\r\n earned = 0\r\n for _ in range(N):\r\n size, price = map(int, input().split())\r\n if shoe_counter.get(size) is None:\r\n continue\r\n elif shoe_counter.get(size) > 0:\r\n shoe_sizes.remove(size)\r\n shoe_counter = Counter(shoe_sizes)\r\n earned += price\r\n print(earned)\r\n\r\n\r\n", "sub_path": "codechallanges/f.collections/1.collections-counter-pythonic.py", "file_name": "1.collections-counter-pythonic.py", "file_ext": "py", "file_size_in_byte": 531, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "collections.Counter", "line_number": 6, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "92373211", "text": "#encoding=utf8\n#\nimport MySQLdb as mysql\n\nall_lines = 100\ncon = mysql.connect(db='mingguangzhen', host='180.153.191.128', user='reboot', passwd='reboot123')\ncon.autocommit(True)\ncursor = con.cursor()\ndecide = cursor.execute('select * from log_data')\ndef insert_db():\n count_dict = {}\n cout = 0\n with open(\"E:\\\\Reboot\\\\Python01\\\\sublime_Text\\\\04\\\\www_access_20140823.log\") as log_file:\n for line in log_file.readlines():\n temp = line.split()\n _tup = (temp[0], temp[8],)\n count_dict[_tup] = count_dict.get(_tup, 0) + 1\n rev_list = sorted(count_dict.items(), key = lambda x:x[1], reverse=True)[:all_lines]\n if decide < all_lines:\n for tup in rev_list:\n ins_sql = 'insert into log_data (ip,status,count) values(\"%s\",\"%s\",\"%s\")' % (tup[0][0], tup[0][1], tup[1])\n cursor.execute(ins_sql)\n\ndef get_info(start,num):\n sql = 'select * from log_data limit %s,%s' % (start,num)\n cursor.execute(sql)\n return cursor.fetchall()\n\n", "sub_path": "07/mingguangzhen/get_data.py", "file_name": "get_data.py", "file_ext": "py", "file_size_in_byte": 1007, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "MySQLdb.connect", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "615830331", "text": "#counter는 연속된 값을 defaultdict(int)와 유사한 객체로 변환해주며, 키와 값의 빈도를 연결시켜 준다.\nfrom collections import Counter\nc=Counter([0,1,2,0]) #{0:2,1:1,2:1} 0이 2개, 1이 1개, 2가 1개\nprint(c)\n\n#특정 문서에서 단어의 개수를 셀 때도 유용\nword_counts=Counter(document)\n\n#가장 자주 나오는 단어 10개와 이 단어들의 빈도수 출력\nfor word, count in word_counts.most_common(10):\n print(word,count)\n", "sub_path": "homework/p029_counter.py", "file_name": "p029_counter.py", "file_ext": "py", "file_size_in_byte": 476, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "collections.Counter", "line_number": 3, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "39094898", "text": "#import statements\nfrom collections import Counter\n\n\ndef fibonnaci(n):\n '''Part I Implement the Fibonnaci Sequence '''\n if n <= 1:\n return n\n else:\n return (fibonnaci(n - 1) + fibonnaci(n - 2))\n\n\ndef gcd(a, b):\n '''Part II: Implement Euclid’s GCD Algorithm'''\n if a == 0:\n return b\n return gcd(b % a, a)\n\n\ndef compareToHelper(s1, s2):\n '''Part III: String Comparison\n function that does recursion'''\n\n #House keeping to handle mismatched string sizes\n #Assigns ascii value, or 0\n s1value = ord(s1[0]) if s1 else 0\n s2value = ord(s2[0]) if s2 else 0\n\n #dictionary to hold ascii values\n values = {\"s1\": s1value, \"s2\": s2value}\n\n #if atleast 1 string has more then 1 character left\n if len(s1) > 1 or len(s2) > 1:\n # takes current value and adds next value\n # Generalized base case is value = n + (n+1)\n # we use recursion to get (n+1)\n return Counter(values) + Counter(compareToHelper(s1[1:len(s1)], s2[1:len(s2)]))\n\n #if on last character does just returns, no need to keep going deeper.\n else:\n return values\n\n\ndef compareTo(s1,s2):\n '''Part III String Comparison\n function to calculate the difference between the 2 strings'''\n recurse = compareToHelper(s1, s2)\n return recurse[\"s1\"] - recurse[\"s2\"]\n\n\nif __name__ == '__main__':\n '''Main Entry Point'''\n print(f'Fibonnaci Sequence of 6: {fibonnaci(6)}')\n print(f'Fibonnaci Sequence of 7: {fibonnaci(7)}')\n print(f'Great Common Divisor (10, 15): {gcd(10, 15)}')\n print(f'Great Common Divisor (31, 2): {gcd(31, 2)}')\n print(f'String Comparison (qw2$$, aa): {compareTo(\"qw2$$\", \"aa\")}')\n print(f'String Comparison (b, aa): {compareTo(\"b\", \"aa\")}')\n", "sub_path": "recursion.py", "file_name": "recursion.py", "file_ext": "py", "file_size_in_byte": 1744, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "collections.Counter", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "275158610", "text": "\"\"\"Defines the templaters.\"\"\"\nimport logging\nimport os.path\nimport pkgutil\nimport importlib\nimport sys\nfrom functools import reduce\nfrom typing import Callable, Dict, Generator, List, Optional, Tuple\n\nimport jinja2.nodes\nfrom jinja2 import (\n Environment,\n FileSystemLoader,\n TemplateError,\n TemplateSyntaxError,\n meta,\n)\nfrom jinja2.exceptions import TemplateNotFound, UndefinedError\nfrom jinja2.ext import Extension\nfrom jinja2.sandbox import SandboxedEnvironment\n\nfrom sqlfluff.core.config import FluffConfig\nfrom sqlfluff.core.errors import SQLBaseError, SQLTemplaterError\nfrom sqlfluff.core.templaters.base import (\n RawFileSlice,\n TemplatedFile,\n TemplatedFileSlice,\n large_file_check,\n)\nfrom sqlfluff.core.templaters.python import PythonTemplater\nfrom sqlfluff.core.templaters.slicers.tracer import JinjaAnalyzer\n\n# Instantiate the templater logger\ntemplater_logger = logging.getLogger(\"sqlfluff.templater\")\n\n\nclass JinjaTemplater(PythonTemplater):\n \"\"\"A templater using the jinja2 library.\n\n See: https://jinja.palletsprojects.com/\n \"\"\"\n\n name = \"jinja\"\n\n class Libraries:\n \"\"\"Mock namespace for user-defined Jinja library.\"\"\"\n\n pass\n\n @staticmethod\n def _extract_macros_from_template(template, env, ctx):\n \"\"\"Take a template string and extract any macros from it.\n\n Lovingly inspired by http://codyaray.com/2015/05/auto-load-jinja2-macros\n \"\"\"\n from jinja2.runtime import Macro # noqa\n\n # Iterate through keys exported from the loaded template string\n context = {}\n macro_template = env.from_string(template, globals=ctx)\n # This is kind of low level and hacky but it works\n try:\n for k in macro_template.module.__dict__:\n attr = getattr(macro_template.module, k)\n # Is it a macro? If so install it at the name of the macro\n if isinstance(attr, Macro):\n context[k] = attr\n except UndefinedError:\n # This occurs if any file in the macro path references an\n # undefined Jinja variable. It's safe to ignore this. Any\n # meaningful issues will surface later at linting time.\n pass\n # Return the context\n return context\n\n @classmethod\n def _extract_macros_from_path(\n cls, path: List[str], env: Environment, ctx: Dict\n ) -> dict:\n \"\"\"Take a path and extract macros from it.\"\"\"\n macro_ctx = {}\n for path_entry in path:\n # Does it exist? It should as this check was done on config load.\n if not os.path.exists(path_entry):\n raise ValueError(f\"Path does not exist: {path_entry}\")\n\n if os.path.isfile(path_entry):\n # It's a file. Extract macros from it.\n with open(path_entry) as opened_file:\n template = opened_file.read()\n # Update the context with macros from the file.\n try:\n macro_ctx.update(\n cls._extract_macros_from_template(template, env=env, ctx=ctx)\n )\n except TemplateSyntaxError as err:\n raise SQLTemplaterError(\n f\"Error in Jinja macro file {os.path.relpath(path_entry)}: \"\n f\"{err.message}\",\n line_no=err.lineno,\n line_pos=1,\n ) from err\n else:\n # It's a directory. Iterate through files in it and extract from them.\n for dirpath, _, files in os.walk(path_entry):\n for fname in files:\n if fname.endswith(\".sql\"):\n macro_ctx.update(\n cls._extract_macros_from_path(\n [os.path.join(dirpath, fname)], env=env, ctx=ctx\n )\n )\n return macro_ctx\n\n def _extract_macros_from_config(self, config, env, ctx):\n \"\"\"Take a config and load any macros from it.\"\"\"\n if config:\n # This is now a nested section\n loaded_context = (\n config.get_section((self.templater_selector, self.name, \"macros\")) or {}\n )\n else: # pragma: no cover TODO?\n loaded_context = {}\n\n # Iterate to load macros\n macro_ctx = {}\n for value in loaded_context.values():\n macro_ctx.update(\n self._extract_macros_from_template(value, env=env, ctx=ctx)\n )\n return macro_ctx\n\n def _extract_libraries_from_config(self, config):\n # If a more global library_path is set, let that take precedence.\n library_path = config.get(\"library_path\") or config.get_section(\n (self.templater_selector, self.name, \"library_path\")\n )\n if not library_path:\n return {}\n\n libraries = JinjaTemplater.Libraries()\n\n # If library_path has __init__.py we parse it as one module, else we parse it\n # a set of modules\n is_library_module = os.path.exists(os.path.join(library_path, \"__init__.py\"))\n library_module_name = os.path.basename(library_path)\n\n # Need to go one level up to parse as a module correctly\n walk_path = (\n os.path.join(library_path, \"..\") if is_library_module else library_path\n )\n\n for module_finder, module_name, _ in pkgutil.walk_packages([walk_path]):\n # skip other modules that can be near module_dir\n if is_library_module and not module_name.startswith(library_module_name):\n continue\n\n # import_module is deprecated as of python 3.4. This follows roughly\n # the guidance of the python docs:\n # https://docs.python.org/3/library/importlib.html#approximating-importlib-import-module\n spec = module_finder.find_spec(module_name)\n module = importlib.util.module_from_spec(spec)\n sys.modules[module_name] = module\n spec.loader.exec_module(module)\n\n if \".\" in module_name: # nested modules have `.` in module_name\n *module_path, last_module_name = module_name.split(\".\")\n # find parent module recursively\n parent_module = reduce(\n lambda res, path_part: getattr(res, path_part),\n module_path,\n libraries,\n )\n\n # set attribute on module object to make jinja working correctly\n setattr(parent_module, last_module_name, module)\n else:\n # set attr on `libraries` obj to make it work in jinja nicely\n setattr(libraries, module_name, module)\n\n if is_library_module:\n # when library is module we have one more root module in hierarchy and we\n # remove it\n libraries = getattr(libraries, library_module_name)\n\n # remove magic methods from result\n return {k: v for k, v in libraries.__dict__.items() if not k.startswith(\"__\")}\n\n @staticmethod\n def _generate_dbt_builtins():\n \"\"\"Generate the dbt builtins which are injected in the context.\"\"\"\n # This feels a bit wrong defining these here, they should probably\n # be configurable somewhere sensible. But for now they're not.\n # TODO: Come up with a better solution.\n\n class ThisEmulator:\n \"\"\"A class which emulates the `this` class from dbt.\"\"\"\n\n name = \"this_model\"\n schema = \"this_schema\"\n database = \"this_database\"\n\n def __str__(self) -> str: # pragma: no cover TODO?\n return self.name\n\n dbt_builtins = {\n \"ref\": lambda model_ref: model_ref,\n \"source\": lambda source_name, table: f\"{source_name}_{table}\",\n \"config\": lambda **kwargs: \"\",\n \"var\": lambda variable, default=\"\": \"item\",\n # `is_incremental()` renders as True, always in this case.\n # TODO: This means we'll never parse other parts of the query,\n # that are only reachable when `is_incremental()` returns False.\n # We should try to find a solution to that. Perhaps forcing the file\n # to be parsed TWICE if it uses this variable.\n \"is_incremental\": lambda: True,\n \"this\": ThisEmulator(),\n }\n return dbt_builtins\n\n @classmethod\n def _crawl_tree(\n cls, tree, variable_names, raw\n ) -> Generator[SQLTemplaterError, None, None]:\n \"\"\"Crawl the tree looking for occurrences of the undeclared values.\"\"\"\n # First iterate through children\n for elem in tree.iter_child_nodes():\n yield from cls._crawl_tree(elem, variable_names, raw)\n # Then assess self\n if (\n isinstance(tree, jinja2.nodes.Name)\n and getattr(tree, \"name\") in variable_names\n ):\n line_no: int = getattr(tree, \"lineno\")\n tree_name: str = getattr(tree, \"name\")\n line = raw.split(\"\\n\")[line_no - 1]\n pos = line.index(tree_name) + 1\n yield SQLTemplaterError(\n f\"Undefined jinja template variable: {tree_name!r}\",\n line_no=line_no,\n line_pos=pos,\n )\n\n def _get_jinja_env(self, config=None):\n \"\"\"Get a properly configured jinja environment.\"\"\"\n # We explicitly want to preserve newlines.\n macros_path = self._get_macros_path(config)\n ignore_templating = config and \"templating\" in config.get(\"ignore\")\n if ignore_templating:\n\n class SafeFileSystemLoader(FileSystemLoader):\n def get_source(self, environment, name, *args, **kwargs):\n try:\n if not isinstance(name, DummyUndefined):\n return super().get_source(\n environment, name, *args, **kwargs\n )\n raise TemplateNotFound(str(name))\n except TemplateNotFound:\n # When ignore=templating is set, treat missing files\n # or attempts to load an \"Undefined\" file as the first\n # 'base' part of the name / filename rather than failing.\n templater_logger.debug(\n \"Providing dummy contents for Jinja macro file: %s\", name\n )\n value = os.path.splitext(os.path.basename(str(name)))[0]\n return value, f\"{value}.sql\", lambda: False\n\n loader = SafeFileSystemLoader(macros_path or [])\n else:\n loader = FileSystemLoader(macros_path) if macros_path else None\n extensions = [\"jinja2.ext.do\"]\n if self._apply_dbt_builtins(config):\n extensions.append(DBTTestExtension)\n\n return SandboxedEnvironment(\n keep_trailing_newline=True,\n # The do extension allows the \"do\" directive\n autoescape=False,\n extensions=extensions,\n loader=loader,\n )\n\n def _get_macros_path(self, config: FluffConfig) -> Optional[List[str]]:\n if config:\n macros_path = config.get_section(\n (self.templater_selector, self.name, \"load_macros_from_path\")\n )\n if macros_path:\n result = [s.strip() for s in macros_path.split(\",\") if s.strip()]\n if result:\n return result\n return None\n\n def _apply_dbt_builtins(self, config: FluffConfig) -> bool:\n if config:\n return config.get_section(\n (self.templater_selector, self.name, \"apply_dbt_builtins\")\n )\n return False\n\n def get_context(self, fname=None, config=None, **kw) -> Dict:\n \"\"\"Get the templating context from the config.\"\"\"\n # Load the context\n env = kw.pop(\"env\")\n live_context = super().get_context(fname=fname, config=config)\n # Apply dbt builtin functions if we're allowed.\n if config:\n # first make libraries available in the context\n # so they can be used by the macros too\n libraries = self._extract_libraries_from_config(config=config)\n live_context.update(libraries)\n\n if libraries.get(\"SQLFLUFF_JINJA_FILTERS\"):\n env.filters.update(libraries.get(\"SQLFLUFF_JINJA_FILTERS\"))\n\n if self._apply_dbt_builtins(config):\n # This feels a bit wrong defining these here, they should probably\n # be configurable somewhere sensible. But for now they're not.\n # TODO: Come up with a better solution.\n dbt_builtins = self._generate_dbt_builtins()\n for name in dbt_builtins:\n # Only apply if it hasn't already been set at this stage.\n if name not in live_context:\n live_context[name] = dbt_builtins[name]\n\n # Load macros from path (if applicable)\n if config:\n macros_path = self._get_macros_path(config)\n if macros_path:\n live_context.update(\n self._extract_macros_from_path(\n macros_path, env=env, ctx=live_context\n )\n )\n\n # Load config macros, these will take precedence over macros from the path\n live_context.update(\n self._extract_macros_from_config(\n config=config, env=env, ctx=live_context\n )\n )\n\n return live_context\n\n def construct_render_func(\n self, fname=None, config=None\n ) -> Tuple[Environment, dict, Callable[[str], str]]:\n \"\"\"Builds and returns objects needed to create and run templates.\"\"\"\n # Load the context\n env = self._get_jinja_env(config)\n live_context = self.get_context(fname=fname, config=config, env=env)\n\n def render_func(in_str: str) -> str:\n \"\"\"Used by JinjaTracer to instantiate templates.\n\n This function is a closure capturing internal state from process().\n Note that creating templates involves quite a bit of state known to\n _this_ function but not to JinjaTracer.\n\n https://www.programiz.com/python-programming/closure\n \"\"\"\n # Load the template, passing the global context.\n try:\n template = env.from_string(in_str, globals=live_context)\n except TemplateSyntaxError as err: # pragma: no cover\n # Something in the template didn't parse, return the original\n # and a violation around what happened.\n # NOTE: Most parsing exceptions will be captured when we call\n # env.parse() in the .process() method. Hence this exception\n # handling should never be called.\n raise SQLTemplaterError(\n f\"Failure to parse jinja template: {err}.\",\n line_no=err.lineno,\n )\n return template.render()\n\n return env, live_context, render_func\n\n @large_file_check\n def process(\n self, *, in_str: str, fname: str, config=None, formatter=None\n ) -> Tuple[Optional[TemplatedFile], list]:\n \"\"\"Process a string and return the new string.\n\n Note that the arguments are enforced as keywords\n because Templaters can have differences in their\n `process` method signature.\n A Templater that only supports reading from a file\n would need the following signature:\n process(*, fname, in_str=None, config=None)\n (arguments are swapped)\n\n Args:\n in_str (:obj:`str`): The input string.\n fname (:obj:`str`, optional): The filename of this string. This is\n mostly for loading config files at runtime.\n config (:obj:`FluffConfig`): A specific config to use for this\n templating operation. Only necessary for some templaters.\n formatter (:obj:`CallbackFormatter`): Optional object for output.\n\n \"\"\"\n if not config: # pragma: no cover\n raise ValueError(\n \"For the jinja templater, the `process()` method requires a config \"\n \"object.\"\n )\n\n try:\n env, live_context, render_func = self.construct_render_func(\n fname=fname, config=config\n )\n except SQLTemplaterError as err:\n return None, [err]\n\n violations: List[SQLBaseError] = []\n\n # Attempt to identify any undeclared variables or syntax errors.\n # The majority of variables will be found during the _crawl_tree\n # step rather than this first Exception which serves only to catch\n # catastrophic errors.\n try:\n syntax_tree = env.parse(in_str)\n potentially_undefined_variables = meta.find_undeclared_variables(\n syntax_tree\n )\n except Exception as err:\n unrendered_out = TemplatedFile(\n source_str=in_str,\n fname=fname,\n )\n templater_error = SQLTemplaterError(\n \"Failed to parse Jinja syntax. Correct the syntax or select an \"\n \"alternative templater.\"\n )\n # Capture a line number if we can.\n if isinstance(err, TemplateSyntaxError):\n templater_error.line_no = err.lineno\n return unrendered_out, [templater_error]\n\n undefined_variables = set()\n\n class UndefinedRecorder:\n \"\"\"Similar to jinja2.StrictUndefined, but remembers, not fails.\"\"\"\n\n # Tell Jinja this object is safe to call and does not alter data.\n # https://jinja.palletsprojects.com/en/2.9.x/sandbox/#jinja2.sandbox.SandboxedEnvironment.is_safe_callable\n unsafe_callable = False\n # https://jinja.palletsprojects.com/en/3.0.x/sandbox/#jinja2.sandbox.SandboxedEnvironment.is_safe_callable\n alters_data = False\n\n @classmethod\n def create(cls, name: str) -> \"UndefinedRecorder\":\n return UndefinedRecorder(name=name)\n\n def __init__(self, name: str) -> None:\n self.name = name\n\n def __str__(self) -> str:\n \"\"\"Treat undefined vars as empty, but remember for later.\"\"\"\n undefined_variables.add(self.name)\n return \"\"\n\n def __getattr__(self, item) -> \"UndefinedRecorder\":\n undefined_variables.add(self.name)\n return UndefinedRecorder(f\"{self.name}.{item}\")\n\n def __call__(self, *args, **kwargs) -> \"UndefinedRecorder\":\n return UndefinedRecorder(f\"{self.name}()\")\n\n Undefined = (\n UndefinedRecorder\n if \"templating\" not in config.get(\"ignore\")\n else DummyUndefined\n )\n for val in potentially_undefined_variables:\n if val not in live_context:\n live_context[val] = Undefined.create(val) # type: ignore\n\n try:\n # Slice the file once rendered.\n raw_sliced, sliced_file, out_str = self.slice_file(\n in_str,\n render_func=render_func,\n config=config,\n )\n if undefined_variables:\n # Lets go through and find out where they are:\n for template_err_val in self._crawl_tree(\n syntax_tree, undefined_variables, in_str\n ):\n violations.append(template_err_val)\n return (\n TemplatedFile(\n source_str=in_str,\n templated_str=out_str,\n fname=fname,\n sliced_file=sliced_file,\n raw_sliced=raw_sliced,\n ),\n violations,\n )\n except (TemplateError, TypeError) as err:\n templater_logger.info(\"Unrecoverable Jinja Error: %s\", err, exc_info=True)\n template_err: SQLBaseError = SQLTemplaterError(\n (\n \"Unrecoverable failure in Jinja templating: {}. Have you \"\n \"configured your variables? \"\n \"https://docs.sqlfluff.com/en/latest/configuration.html\"\n ).format(err),\n # We don't have actual line number information, but specify\n # line 1 so users can ignore with \"noqa\" if they want. (The\n # default is line 0, which can't be ignored because it's not\n # a valid line number.)\n line_no=1,\n line_pos=1,\n )\n violations.append(template_err)\n return None, violations\n\n def slice_file(\n self, raw_str: str, render_func: Callable[[str], str], config=None, **kwargs\n ) -> Tuple[List[RawFileSlice], List[TemplatedFileSlice], str]:\n \"\"\"Slice the file to determine regions where we can fix.\"\"\"\n # The JinjaTracer slicing algorithm is more robust, but it requires\n # us to create and render a second template (not raw_str).\n\n templater_logger.info(\"Slicing File Template\")\n templater_logger.debug(\" Raw String: %r\", raw_str[:80])\n analyzer = JinjaAnalyzer(raw_str, self._get_jinja_env())\n tracer = analyzer.analyze(render_func)\n trace = tracer.trace(append_to_templated=kwargs.pop(\"append_to_templated\", \"\"))\n return trace.raw_sliced, trace.sliced_file, trace.templated_str\n\n\nclass DummyUndefined(jinja2.Undefined):\n \"\"\"Acts as a dummy value to try and avoid template failures.\n\n Inherits from jinja2.Undefined so Jinja's default() filter will\n treat it as a missing value, even though it has a non-empty value\n in normal contexts.\n \"\"\"\n\n # Tell Jinja this object is safe to call and does not alter data.\n # https://jinja.palletsprojects.com/en/2.9.x/sandbox/#jinja2.sandbox.SandboxedEnvironment.is_safe_callable\n unsafe_callable = False\n # https://jinja.palletsprojects.com/en/3.0.x/sandbox/#jinja2.sandbox.SandboxedEnvironment.is_safe_callable\n alters_data = False\n\n def __init__(self, name) -> None:\n super().__init__()\n self.name = name\n\n def __str__(self) -> str:\n return self.name.replace(\".\", \"_\")\n\n @classmethod\n def create(cls, name) -> \"DummyUndefined\":\n \"\"\"Factory method.\n\n When ignoring=templating is configured, use 'name' as the value for\n undefined variables. We deliberately avoid recording and reporting\n undefined variables as errors. Using 'name' as the value won't always\n work, but using 'name', combined with implementing the magic methods\n (such as __eq__, see above), works well in most cases.\n \"\"\"\n templater_logger.debug(\n \"Providing dummy value for undefined Jinja variable: %s\", name\n )\n result = DummyUndefined(name)\n return result\n\n def __getattr__(self, item):\n return self.create(f\"{self.name}.{item}\")\n\n # Implement the most common magic methods. This helps avoid\n # templating errors for undefined variables.\n # https://www.tutorialsteacher.com/python/magic-methods-in-python\n def _self_impl(self, *args, **kwargs) -> \"DummyUndefined\":\n return self\n\n def _bool_impl(self, *args, **kwargs) -> bool:\n return True\n\n __add__ = _self_impl\n __sub__ = _self_impl\n __mul__ = _self_impl\n __floordiv__ = _self_impl\n __truediv__ = _self_impl\n __mod__ = _self_impl\n __pow__ = _self_impl\n __pos__ = _self_impl\n __neg__ = _self_impl\n __lshift__ = _self_impl\n __rshift__ = _self_impl\n __getitem__ = _self_impl\n __invert__ = _self_impl\n __call__ = _self_impl\n __and__ = _bool_impl\n __or__ = _bool_impl\n __xor__ = _bool_impl\n __bool__ = _bool_impl\n __lt__ = _bool_impl\n __le__ = _bool_impl\n __eq__ = _bool_impl\n __ne__ = _bool_impl\n __ge__ = _bool_impl\n __gt__ = _bool_impl\n\n def __hash__(self) -> int: # pragma: no cov\n # This is called by the \"in\" operator, among other things.\n return 0\n\n def __iter__(self):\n return [self].__iter__()\n\n\nclass DBTTestExtension(Extension):\n \"\"\"Jinja extension to handle the dbt test tag.\"\"\"\n\n tags = {\"test\"}\n\n def parse(self, parser) -> jinja2.nodes.Macro:\n \"\"\"Parses out the contents of the test tag.\"\"\"\n node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)\n test_name = parser.parse_assign_target(name_only=True).name\n\n parser.parse_signature(node)\n node.name = f\"test_{test_name}\"\n node.body = parser.parse_statements((\"name:endtest\",), drop_needle=True)\n return node\n", "sub_path": "src/sqlfluff/core/templaters/jinja.py", "file_name": "jinja.py", "file_ext": "py", "file_size_in_byte": 25117, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "logging.getLogger", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlfluff.core.templaters.python.PythonTemplater", "line_number": 37, "usage_type": "name"}, {"api_name": "jinja2.runtime.Macro", "line_number": 66, "usage_type": "name"}, {"api_name": "jinja2.exceptions.UndefinedError", "line_number": 68, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 78, "usage_type": "name"}, {"api_name": "jinja2.Environment", "line_number": 78, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 78, "usage_type": "name"}, {"api_name": "os.path.path.exists", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 84, "usage_type": "name"}, {"api_name": "os.path.path.isfile", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 87, "usage_type": "name"}, {"api_name": "jinja2.TemplateSyntaxError", "line_number": 96, "usage_type": "name"}, {"api_name": "sqlfluff.core.errors.SQLTemplaterError", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path.path.relpath", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 98, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 98, "usage_type": "name"}, {"api_name": "os.path.walk", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 110, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 110, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 110, "usage_type": "name"}, {"api_name": "{'Macro': 'jinja2.runtime.Macro'}.Libraries", "line_number": 141, "usage_type": "call"}, {"api_name": "os.path.path.exists", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 145, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 145, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path.path.basename", "line_number": 146, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 146, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 146, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 150, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 150, "usage_type": "name"}, {"api_name": "pkgutil.walk_packages", "line_number": 153, "usage_type": "call"}, {"api_name": "importlib.util.module_from_spec", "line_number": 162, "usage_type": "call"}, {"api_name": "importlib.util", "line_number": 162, "usage_type": "attribute"}, {"api_name": "sys.modules", "line_number": 163, "usage_type": "attribute"}, {"api_name": "functools.reduce", "line_number": 169, "usage_type": "call"}, {"api_name": "jinja2.nodes.nodes", "line_number": 231, "usage_type": "attribute"}, {"api_name": "jinja2.nodes", "line_number": 231, "usage_type": "name"}, {"api_name": "sqlfluff.core.errors.SQLTemplaterError", "line_number": 238, "usage_type": "call"}, {"api_name": "typing.Generator", "line_number": 224, "usage_type": "name"}, {"api_name": "sqlfluff.core.errors.SQLTemplaterError", "line_number": 224, "usage_type": "name"}, {"api_name": "jinja2.FileSystemLoader", "line_number": 251, "usage_type": "name"}, {"api_name": "jinja2.exceptions.TemplateNotFound", "line_number": 258, "usage_type": "call"}, {"api_name": "jinja2.exceptions.TemplateNotFound", "line_number": 259, "usage_type": "name"}, {"api_name": "os.path.path.splitext", "line_number": 266, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 266, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 266, "usage_type": "name"}, {"api_name": "os.path.path.basename", "line_number": 266, "usage_type": "call"}, {"api_name": "jinja2.FileSystemLoader", "line_number": 271, "usage_type": "call"}, {"api_name": "jinja2.sandbox.SandboxedEnvironment", "line_number": 276, "usage_type": "call"}, {"api_name": "sqlfluff.core.config.FluffConfig", "line_number": 284, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 284, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 284, "usage_type": "name"}, {"api_name": "sqlfluff.core.config.FluffConfig", "line_number": 295, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 302, "usage_type": "name"}, {"api_name": "jinja2.TemplateSyntaxError", "line_number": 366, "usage_type": "name"}, {"api_name": "sqlfluff.core.errors.SQLTemplaterError", "line_number": 372, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 348, "usage_type": "name"}, {"api_name": "jinja2.Environment", "line_number": 348, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 348, "usage_type": "name"}, {"api_name": "sqlfluff.core.errors.SQLTemplaterError", "line_number": 413, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 416, "usage_type": "name"}, {"api_name": "sqlfluff.core.errors.SQLBaseError", "line_number": 416, "usage_type": "name"}, {"api_name": "jinja2.meta.find_undeclared_variables", "line_number": 424, "usage_type": "call"}, {"api_name": "jinja2.meta", "line_number": 424, "usage_type": "name"}, {"api_name": "sqlfluff.core.templaters.base.TemplatedFile", "line_number": 428, "usage_type": "call"}, {"api_name": "sqlfluff.core.errors.SQLTemplaterError", "line_number": 432, "usage_type": "call"}, {"api_name": "jinja2.TemplateSyntaxError", "line_number": 437, "usage_type": "argument"}, {"api_name": "sqlfluff.core.templaters.base.TemplatedFile", "line_number": 494, "usage_type": "call"}, {"api_name": "jinja2.TemplateError", "line_number": 503, "usage_type": "name"}, {"api_name": "sqlfluff.core.errors.SQLBaseError", "line_number": 505, "usage_type": "name"}, {"api_name": "sqlfluff.core.errors.SQLTemplaterError", "line_number": 505, "usage_type": "call"}, {"api_name": "sqlfluff.core.templaters.base.large_file_check", "line_number": 380, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 383, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 383, "usage_type": "name"}, {"api_name": "sqlfluff.core.templaters.base.TemplatedFile", "line_number": 383, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 522, "usage_type": "name"}, {"api_name": "sqlfluff.core.templaters.slicers.tracer.JinjaAnalyzer", "line_number": 530, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 523, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 523, "usage_type": "name"}, {"api_name": "sqlfluff.core.templaters.base.RawFileSlice", "line_number": 523, "usage_type": "name"}, {"api_name": "sqlfluff.core.templaters.base.TemplatedFileSlice", "line_number": 523, "usage_type": "name"}, {"api_name": "jinja2.nodes.Undefined", "line_number": 536, "usage_type": "attribute"}, {"api_name": "jinja2.nodes", "line_number": 536, "usage_type": "name"}, {"api_name": "jinja2.ext.Extension", "line_number": 618, "usage_type": "name"}, {"api_name": "jinja2.nodes.nodes.Macro", "line_number": 625, "usage_type": "call"}, {"api_name": "jinja2.nodes.nodes", "line_number": 625, "usage_type": "attribute"}, {"api_name": "jinja2.nodes", "line_number": 625, "usage_type": "name"}, {"api_name": "jinja2.nodes.nodes", "line_number": 623, "usage_type": "attribute"}, {"api_name": "jinja2.nodes", "line_number": 623, "usage_type": "name"}]} +{"seq_id": "52677200", "text": "from django.urls import path\n\nfrom . views import RegisterView,LoginView,LogOutView ,test_json\n\nurlpatterns = [\n path(r'register/',RegisterView.as_view(),name='register'),\n path(r'login/',LoginView.as_view(),name='login'),\n # 注销\n path('logout/',LogOutView.as_view(),name='logout'),\n path('json_test/',test_json)\n]", "sub_path": "python/9/21/1.智游教学管理系统 登陆注册首页学科列表/myPro/user/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 332, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "views.RegisterView.as_view", "line_number": 6, "usage_type": "call"}, {"api_name": "views.RegisterView", "line_number": 6, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "views.LoginView.as_view", "line_number": 7, "usage_type": "call"}, {"api_name": "views.LoginView", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "views.LogOutView.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "views.LogOutView", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "views.test_json", "line_number": 10, "usage_type": "argument"}]} +{"seq_id": "99327336", "text": "import os\nimport time\nimport argparse\nimport mxnet as mx\nfrom dataset import load_dataset, color_normalize, cook_label\nfrom toy_ssd import FeatureExtractor, ToySSD, targets, FocalLoss, SmoothL1Loss\n\ndef train(batch_size, context, sgd=False):\n print(\"Loading dataset...\", flush=True)\n training_set, validating_set = load_dataset(batch_size)\n\n features = FeatureExtractor(ctx=context)\n model = ToySSD(1)\n cls_loss = FocalLoss()\n box_loss = SmoothL1Loss()\n\n if os.path.isfile(\"model/toy_ssd.ckpt\"):\n with open(\"model/toy_ssd.ckpt\", \"r\") as f:\n ckpt_lines = f.readlines()\n ckpt_argv = ckpt_lines[-1].split()\n epoch = int(ckpt_argv[0])\n best_L = float(ckpt_argv[1])\n learning_rate = float(ckpt_argv[2])\n epochs_no_progress = int(ckpt_argv[3])\n model.load_parameters(\"model/toy_ssd.params\", ctx=context)\n else:\n epoch = 0\n best_L = float(\"Inf\")\n epochs_no_progress = 0\n learning_rate = 0.0005\n model.initialize(mx.init.Xavier(), ctx=context)\n\n print(\"Learning rate:\", learning_rate)\n if sgd:\n print(\"Optimizer: SGD\")\n trainer = mx.gluon.Trainer(model.collect_params(), \"SGD\",\n {\"learning_rate\": learning_rate, \"momentum\": 0.5, \"clip_gradient\": 5.0})\n else:\n print(\"Optimizer: Adam\")\n trainer = mx.gluon.Trainer(model.collect_params(), \"Adam\",\n {\"learning_rate\": learning_rate, \"clip_gradient\": 5.0})\n print(\"Training...\", flush=True)\n while learning_rate >= 1e-8:\n ts = time.time()\n\n training_L = 0.0\n training_batch = 0\n training_set.reset()\n for batch in training_set:\n training_batch += 1\n x = color_normalize(batch.data[0].as_in_context(context))\n label = cook_label(batch.label[0].as_in_context(context))\n source = features(x)\n with mx.autograd.record():\n anchors, cls_preds, box_preds = model(source)\n cls_target, box_target, box_mask = targets(anchors, cls_preds, label)\n L = cls_loss(cls_preds, cls_target) + box_loss(box_preds, box_target, box_mask)\n L.backward()\n trainer.step(batch_size)\n batch_L = mx.nd.mean(L).asscalar()\n if batch_L != batch_L:\n raise ValueError()\n training_L += batch_L\n print(\"[Epoch %d Batch %d] batch_loss %.10f average_loss %.10f elapsed %.2fs\" %\n (epoch, training_batch, batch_L, training_L / training_batch, time.time() - ts), flush=True)\n\n validating_L = 0.0\n validating_batch = 0\n validating_set.reset()\n cls_metric = mx.metric.Accuracy()\n box_metric = mx.metric.MAE()\n for batch in validating_set:\n validating_batch += 1\n x = color_normalize(batch.data[0].as_in_context(context))\n label = cook_label(batch.label[0].as_in_context(context))\n source = features(x)\n anchors, cls_preds, box_preds = model(source)\n cls_target, box_target, box_mask = targets(anchors, cls_preds, label)\n L = cls_loss(cls_preds, cls_target) + box_loss(box_preds, box_target, box_mask)\n batch_L = mx.nd.mean(L).asscalar()\n if batch_L != batch_L:\n raise ValueError()\n validating_L += batch_L\n cls_metric.update([cls_target], [cls_preds.transpose(axes=(0, 2, 1))])\n box_metric.update([box_target], [box_preds * box_mask])\n\n epoch += 1\n\n avg_L = training_L / training_batch\n print(\"[Epoch %d] learning_rate %.10f training_loss %.10f validating_loss %.10f %s %f %s %f epochs_no_progress %d duration %.2fs\" % (\n epoch,\n learning_rate,\n training_L / training_batch,\n validating_L / validating_batch,\n *cls_metric.get(),\n *box_metric.get(),\n epochs_no_progress,\n time.time() - ts\n ), flush=True)\n\n if avg_L < best_L:\n best_L = avg_L\n epochs_no_progress = 0\n model.save_parameters(\"model/toy_ssd.params\")\n with open(\"model/toy_ssd.ckpt\", \"a\") as f:\n f.write(\"%d %.10f %.10f %d\\n\" % (epoch, best_L, learning_rate, epochs_no_progress))\n elif epochs_no_progress < 2:\n epochs_no_progress += 1\n else:\n epochs_no_progress = 0\n learning_rate *= 0.5\n trainer.set_learning_rate(learning_rate)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Start a toy_ssd trainer.\")\n parser.add_argument(\"--device_id\", help=\"select device that the model using (default: 0)\", type=int, default=0)\n parser.add_argument(\"--gpu\", help=\"using gpu acceleration\", action=\"store_true\")\n parser.add_argument(\"--sgd\", help=\"using sgd optimizer\", action=\"store_true\")\n args = parser.parse_args()\n\n if args.gpu:\n context = mx.gpu(args.device_id)\n else:\n context = mx.cpu(args.device_id)\n\n while True:\n try:\n train(\n batch_size = 256,\n context = context,\n sgd = args.sgd\n )\n break;\n except ValueError:\n print(\"Oops! The value of loss become NaN...\")\n", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 5387, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "dataset.load_dataset", "line_number": 10, "usage_type": "call"}, {"api_name": "toy_ssd.FeatureExtractor", "line_number": 12, "usage_type": "call"}, {"api_name": "toy_ssd.ToySSD", "line_number": 13, "usage_type": "call"}, {"api_name": "toy_ssd.FocalLoss", "line_number": 14, "usage_type": "call"}, {"api_name": "toy_ssd.SmoothL1Loss", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "mxnet.init.Xavier", "line_number": 31, "usage_type": "call"}, {"api_name": "mxnet.init", "line_number": 31, "usage_type": "attribute"}, {"api_name": "mxnet.gluon.Trainer", "line_number": 36, "usage_type": "call"}, {"api_name": "mxnet.gluon", "line_number": 36, "usage_type": "attribute"}, {"api_name": "mxnet.gluon.Trainer", "line_number": 40, "usage_type": "call"}, {"api_name": "mxnet.gluon", "line_number": 40, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 44, "usage_type": "call"}, {"api_name": "dataset.color_normalize", "line_number": 51, "usage_type": "call"}, {"api_name": "dataset.cook_label", "line_number": 52, "usage_type": "call"}, {"api_name": "mxnet.autograd.record", "line_number": 54, "usage_type": "call"}, {"api_name": "mxnet.autograd", "line_number": 54, "usage_type": "attribute"}, {"api_name": "toy_ssd.targets", "line_number": 56, "usage_type": "call"}, {"api_name": "mxnet.nd.mean", "line_number": 60, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 60, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 65, "usage_type": "call"}, {"api_name": "mxnet.metric.Accuracy", "line_number": 70, "usage_type": "call"}, {"api_name": "mxnet.metric", "line_number": 70, "usage_type": "attribute"}, {"api_name": "mxnet.metric.MAE", "line_number": 71, "usage_type": "call"}, {"api_name": "mxnet.metric", "line_number": 71, "usage_type": "attribute"}, {"api_name": "dataset.color_normalize", "line_number": 74, "usage_type": "call"}, {"api_name": "dataset.cook_label", "line_number": 75, "usage_type": "call"}, {"api_name": "toy_ssd.targets", "line_number": 78, "usage_type": "call"}, {"api_name": "mxnet.nd.mean", "line_number": 80, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 80, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 98, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 116, "usage_type": "call"}, {"api_name": "mxnet.gpu", "line_number": 123, "usage_type": "call"}, {"api_name": "mxnet.cpu", "line_number": 125, "usage_type": "call"}]} +{"seq_id": "531169937", "text": "#!/usr/bin/python\nimport sys\nimport _mysql\nimport cgi\nimport re\n#sys.stdout.write(\"Content-Type:text/plain\\n\\n\")\nfrom cloudNG import *\nsys.stdout.write(\"Content-Type:text/xml\\n\\n\")\n#sys.stdout.write(open(\"docs/index.html\").read())\nform=cgi.FieldStorage()\n\n\n\ndef xmlsafe(text):\n\ttext=re.compile(\"[\\n\\r]\").sub(\"
\",text)\n\t\n\tsafe=re.compile(\"<\").sub(\"{:leftbracket:}\", re.compile(\">\").sub(\"{:rightbracket:}\", re.compile(\"&\").sub(\"{:ampersand:}\", re.compile(\"/\").sub(\"{:forwardslash:}\", text ) ) ) )\n\n\treturn text\n\nsys.stdout.write(\"\\n\")\ncostResult,stockResult=brewerslabCloudApi().checkStockAndPrice(\"test@example.com\", form['recipeName'].value, form['process'].value,True)\nsys.stderr.write(\"stockResult['__out_of_stock__']\\n\")\nsys.stderr.write(\"%s\\n\" %(stockResult['__out_of_stock__']))\nsys.stderr.write(\"\\n\")\nsys.stdout.write(\"%s\\n\" %(len(stockResult['__out_of_stock__'])))\nfor s in range(len(stockResult['__out_of_stock__'])):\n\tsys.stdout.write(\"%s\\n\" %(s,xmlsafe(stockResult['__out_of_stock__'][s]),s))\nsys.stdout.write(\"\")\nsys.stdout.flush()\n\n", "sub_path": "brewerslab-orig-commander/metroui/ajaxStockPreCheck.py", "file_name": "ajaxStockPreCheck.py", "file_ext": "py", "file_size_in_byte": 1106, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "sys.stdout.write", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 8, "usage_type": "attribute"}, {"api_name": "cgi.FieldStorage", "line_number": 10, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 15, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 21, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 21, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 23, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 23, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 24, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 24, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 25, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 25, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 26, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 26, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 28, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 28, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 29, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 29, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 30, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 30, "usage_type": "attribute"}]} +{"seq_id": "334167435", "text": "from django.conf.urls.defaults import *\n#from django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom django.views.generic import DetailView, ListView\nfrom app1.models import Advisor,Tuto\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nfrom django.conf import settings\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n #url(r'^$','app1.views.index'),\n \n url(r'^$','app1.views.index'),\n\n url(r'^apply/',\n ListView.as_view(\n model = Tuto,\n template_name='apply.html')),\n # Examples:\n # url(r'^$', 'tuto.views.home', name='home'),\n # url(r'^tuto/', include('tuto.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n)\n\nif settings.DEBUG:\n urlpatterns += patterns('',\n url(r'^media/(?P.*)$', 'django.views.static.serve', {\n 'document_root': settings.MEDIA_ROOT,\n }),\n url(r'^static/(?P.*)$', 'django.views.static.serve', {\n 'document_root': settings.STATIC_ROOT,\n }),\n )", "sub_path": "tuto/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1267, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.contrib.admin.autodiscover", "line_number": 9, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 9, "usage_type": "name"}, {"api_name": "django.views.generic.ListView.as_view", "line_number": 17, "usage_type": "call"}, {"api_name": "django.views.generic.ListView", "line_number": 17, "usage_type": "name"}, {"api_name": "app1.models.Tuto", "line_number": 18, "usage_type": "name"}, {"api_name": "django.contrib.admin.site", "line_number": 28, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 28, "usage_type": "name"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 31, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 34, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 34, "usage_type": "name"}, {"api_name": "django.conf.settings.STATIC_ROOT", "line_number": 37, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "515980533", "text": "import torch\n\nfrom geoopt import util\nfrom .base import Manifold\n\n\n__all__ = [\"Stiefel\", \"EuclideanStiefel\", \"CanonicalStiefel\"]\n\n\n_stiefel_doc = r\"\"\"\n Manifold induced by the following matrix constraint:\n\n .. math::\n\n X^\\top X = I\\\\\n X \\in \\mathrm{R}^{n\\times m}\\\\\n n \\ge m\n\"\"\"\n\n\nclass Stiefel(Manifold):\n __doc__ = r\"\"\"\n {}\n\n Parameters\n ----------\n canonical : bool\n Use canonical inner product instead of euclidean one (defaults to canonical)\n \"\"\".format(\n _stiefel_doc\n )\n ndim = 2\n\n def __new__(cls, canonical=True):\n if cls is Stiefel:\n if canonical:\n return super().__new__(CanonicalStiefel)\n else:\n return super().__new__(EuclideanStiefel)\n else:\n return super().__new__(cls)\n\n def _check_shape(self, x, name):\n dim_is_ok = x.dim() >= 2\n if not dim_is_ok:\n return False, \"Not enough dimensions for `{}`\".format(name)\n shape_is_ok = x.shape[-1] <= x.shape[-2]\n if not shape_is_ok:\n return (\n False,\n \"`{}` should have shape[-1] <= shape[-2], got {} ...ij\", [U, torch.ones_like(d), V])\n\n\nclass CanonicalStiefel(Stiefel):\n __doc__ = r\"\"\"Stiefel Manifold with Canonical inner product\n\n {}\n \"\"\".format(\n _stiefel_doc\n )\n\n name = \"Stiefel(canonical)\"\n reversible = True\n\n def _inner(self, x, u, v):\n # _x = tr(u^T(I-1/2xx^T)v)\n # = tr(u^T(v-1/2xx^Tv))\n # = tr(u^Tv-1/2u^Txx^Tv)\n # = tr(u^Tv-1/2u^Txx^Tv)\n # = tr(u^Tv)-1/2tr(x^Tvu^Tx)\n # = \\sum_ij{(u*v}_ij}-1/2\\sum_ij{(x^Tv * x^Tu)_ij}\n xtu = x.transpose(-1, -2) @ u\n if v is None:\n xtv = xtu\n v = u\n else:\n xtv = x.transpose(-1, -2) @ v\n return (u * v).sum([-1, -2]) - 0.5 * (xtv * xtu).sum([-1, -2])\n\n # we do faster on inner without autofill\n _inner_autofill = False\n\n def _transp_one(self, x, u, t, v):\n a = self._amat(x, u)\n rhs = v + t / 2 * a @ v\n lhs = -t / 2 * a\n lhs[..., torch.arange(a.shape[-2]), torch.arange(x.shape[-2])] += 1\n qv, _ = torch.gesv(rhs, lhs)\n return qv\n\n def _transp_many(self, x, u, t, *vs):\n \"\"\"\n An optimized transp_many for Stiefel Manifold\n \"\"\"\n n = len(vs)\n vs = torch.cat(vs, -1)\n qvs = self._transp_one(x, u, t, vs).view(*x.shape[:-1], -1, x.shape[-1])\n return tuple(qvs[..., i, :] for i in range(n))\n\n def _retr_transp(self, x, u, t, v, *more):\n \"\"\"\n An optimized retr_transp for Stiefel Manifold\n \"\"\"\n n = 2 + len(more)\n xvs = torch.cat((x, v) + more, -1)\n qxvs = self._transp_one(x, u, t, xvs).view(*x.shape[:-1], -1, x.shape[-1])\n return tuple(qxvs[..., i, :] for i in range(n))\n\n def _proju(self, x, u):\n return u - x @ u.transpose(-1, -2) @ x\n\n def _retr(self, x, u, t):\n return self._transp_one(x, u, t, x)\n\n\nclass EuclideanStiefel(Stiefel):\n __doc__ = r\"\"\"Stiefel Manifold with Euclidean inner product\n\n {}\n \"\"\".format(\n _stiefel_doc\n )\n\n name = \"Stiefel(euclidean)\"\n reversible = False\n\n def _proju(self, x, u):\n return u - x @ util.linalg.sym(x.transpose(-1, -2) @ u)\n\n def _transp_one(self, x, u, t, v, y=None):\n if y is None:\n y = self._retr(x, u, t)\n return self._proju(y, v)\n\n def _transp_many(self, x, u, t, *vs, y=None):\n if y is None:\n y = self._retr(x, u, t)\n return tuple(self._proju(y, v) for v in vs)\n\n def _retr_transp(self, x, u, t, v, *more):\n y = self._retr(x, u, t)\n vs = self._transp_many(x, u, t, v, *more, y=y)\n return (y,) + vs\n\n def _inner(self, x, u, v):\n return (u * v).sum([-1, -2])\n\n def _retr(self, x, u, t):\n q, r = util.linalg.qr(x + u * t)\n unflip = torch.sign(torch.sign(util.linalg.extract_diag(r)) + 0.5)\n q *= unflip[..., None, :]\n return q\n", "sub_path": "geoopt/manifolds/stiefel.py", "file_name": "stiefel.py", "file_ext": "py", "file_size_in_byte": 5185, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "base.Manifold", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.arange", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.allclose", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.allclose", "line_number": 68, "usage_type": "call"}, {"api_name": "geoopt.util.linalg.svd", "line_number": 77, "usage_type": "call"}, {"api_name": "geoopt.util.linalg", "line_number": 77, "usage_type": "attribute"}, {"api_name": "geoopt.util", "line_number": 77, "usage_type": "name"}, {"api_name": "torch.einsum", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.ones_like", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.gesv", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 123, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 132, "usage_type": "call"}, {"api_name": "geoopt.util.linalg.sym", "line_number": 155, "usage_type": "call"}, {"api_name": "geoopt.util.linalg", "line_number": 155, "usage_type": "attribute"}, {"api_name": "geoopt.util", "line_number": 155, "usage_type": "name"}, {"api_name": "geoopt.util.linalg.qr", "line_number": 176, "usage_type": "call"}, {"api_name": "geoopt.util.linalg", "line_number": 176, "usage_type": "attribute"}, {"api_name": "geoopt.util", "line_number": 176, "usage_type": "name"}, {"api_name": "torch.sign", "line_number": 177, "usage_type": "call"}, {"api_name": "geoopt.util.linalg.extract_diag", "line_number": 177, "usage_type": "call"}, {"api_name": "geoopt.util.linalg", "line_number": 177, "usage_type": "attribute"}, {"api_name": "geoopt.util", "line_number": 177, "usage_type": "name"}]} +{"seq_id": "515941407", "text": "\"\"\"\nTalk of Europe Creative Camp #2 :: Wordcloud project :: feature extractors\nThe callables in this module extract features (e.g. words and noun_phrases) from text.\n\nCopyright 2015, Konstantin Tretyakov, Ilya Kuzovkin, Alexander Tkachenko.\nLicense: MIT\n\"\"\"\nfrom textblob import TextBlob\nfrom unidecode import unidecode\nfrom textblob.np_extractors import ConllExtractor\nfrom collections import Counter\nimport requests\nimport json\n\n\ndef words(txt):\n return Counter(TextBlob(unidecode(txt)).words.lower().lemmatize())\n\n\nclass NounPhraseExtractor(object):\n \"\"\"\n Usage:\n >>>extr = NounPhraseExtractor()\n >>>extr(\"Hello Mister President\")\n >>>['mister president']\n \"\"\"\n def __init__(self):\n self.extractor = ConllExtractor()\n \n def __call__(self, txt):\n return list(TextBlob(unidecode(txt), np_extractor=self.extractor).noun_phrases)\n\nnoun_phrases = NounPhraseExtractor()\n\ndef dbpedia_entities(txt):\n url = 'http://spotlight.dbpedia.org/rest/annotate'\n payload = {'text': txt, 'confidence': 0.2, 'support:': 20}\n headers = {'Accept': 'application/json'}\n r = requests.post(url, data=payload, headers=headers)\n return json.loads(r.text)", "sub_path": "src/talkofeuropewords/talkofeuropewords/extract.py", "file_name": "extract.py", "file_ext": "py", "file_size_in_byte": 1205, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "collections.Counter", "line_number": 17, "usage_type": "call"}, {"api_name": "textblob.TextBlob", "line_number": 17, "usage_type": "call"}, {"api_name": "unidecode.unidecode", "line_number": 17, "usage_type": "call"}, {"api_name": "textblob.np_extractors.ConllExtractor", "line_number": 28, "usage_type": "call"}, {"api_name": "textblob.TextBlob", "line_number": 31, "usage_type": "call"}, {"api_name": "unidecode.unidecode", "line_number": 31, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 39, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "210850457", "text": "\"\"\"\nA controller for MongoDB useful for running tests.\nProduction use is not recommended.\n\"\"\"\nfrom pathlib import Path\nimport os\nimport tempfile\nimport subprocess\nimport time\nimport shutil\nfrom pymongo.mongo_client import MongoClient\nimport semver\nimport test.util as test_util\n\n\nclass MongoController:\n \"\"\"\n The main MongoDB controller class.\n Attributes:\n port - the port for the MongoDB service.\n temp_dir - the location of the MongoDB data and logs.\n client - a pymongo client pointed at the server.\n db_version - the version of the mongod executable.\n index_version - the version of the indexes created by the mongod executable - 1 for < 3.4.0,\n 2 otherwise.\n includes_system_indexes - true if system indexes will be included when listing database\n indexes, false otherwise.\n \"\"\"\n\n def __init__(self, mongoexe: Path, root_temp_dir: Path, use_wired_tiger: bool=False) -> None:\n '''\n Create and start a new MongoDB database. An unused port will be selected for the server.\n :param mongoexe: The path to the MongoDB server executable (e.g. mongod) to run.\n :param root_temp_dir: A temporary directory in which to store MongoDB data and log files.\n The files will be stored inside a child directory that is unique per invocation.\n :param use_wired_tiger: For MongoDB versions > 3.0, specify that the Wired Tiger storage\n engine should be used. Setting this to true for other versions will cause an error.\n '''\n if not mongoexe or not os.access(mongoexe, os.X_OK):\n raise test_util.TestException('mongod executable path {} does not exist or is not executable.'\n .format(mongoexe))\n if not root_temp_dir:\n raise ValueError('root_temp_dir is None')\n\n # make temp dirs\n root_temp_dir = root_temp_dir.absolute()\n os.makedirs(root_temp_dir, exist_ok=True)\n self.temp_dir = Path(tempfile.mkdtemp(prefix='MongoController-', dir=str(root_temp_dir)))\n data_dir = self.temp_dir.joinpath('data')\n os.makedirs(data_dir)\n\n self.port = test_util.find_free_port()\n\n command = [str(mongoexe), '--port', str(self.port), '--dbpath', str(data_dir),\n '--nojournal']\n if use_wired_tiger:\n command.extend(['--storageEngine', 'wiredTiger'])\n\n self._outfile = open(self.temp_dir.joinpath('mongo.log'), 'w')\n\n self._proc = subprocess.Popen(command, stdout=self._outfile, stderr=subprocess.STDOUT)\n time.sleep(1) # wait for server to start up\n self.client = MongoClient('localhost', self.port)\n # check that the server is up. See\n # https://api.mongodb.com/python/3.7.0/api/pymongo/mongo_client.html\n # #pymongo.mongo_client.MongoClient\n self.client.admin.command('ismaster')\n\n # get some info about the db\n self.db_version = self.client.server_info()['version']\n self.index_version = 2 if (semver.compare(self.db_version, '3.4.0') >= 0) else 1\n self.includes_system_indexes = (semver.compare(self.db_version, '3.2.0') < 0\n and not use_wired_tiger)\n\n def destroy(self, delete_temp_files: bool) -> None:\n \"\"\"\n Shut down the MongoDB server.\n :param delete_temp_files: delete all the MongoDB data files and logs generated during the\n test.\n \"\"\"\n if self.client:\n self.client.close()\n if self._proc:\n self._proc.terminate()\n if self._outfile:\n self._outfile.close()\n if delete_temp_files and self.temp_dir:\n shutil.rmtree(self.temp_dir)\n\n def clear_database(self, db_name, drop_indexes=False):\n '''\n Remove all data from a database.\n :param db_name: the name of the db to clear.\n :param drop_indexes: drop all indexes if true, retain indexes (which will be empty) if\n false.\n '''\n if drop_indexes:\n self.client.drop_database(db_name)\n else:\n db = self.client[db_name]\n for name in db.list_collection_names():\n if not name.startswith('system.'):\n # don't drop collection since that drops indexes\n db.get_collection(name).delete_many({})\n\n\ndef main():\n import conftest\n conftest.pytest_sessionstart(None)\n mongoexe = test_util.get_mongo_exe()\n root_temp_dir = test_util.get_temp_dir()\n\n mc = MongoController(mongoexe, root_temp_dir, False)\n print('port: ' + str(mc.port))\n print('temp_dir: ' + str(mc.temp_dir))\n print('db_version: ' + mc.db_version)\n print('index_version: ' + str(mc.index_version))\n print('includes_system_indexes: ' + str(mc.includes_system_indexes))\n mc.client['foo']['bar'].insert_one({'foo': 'bar'})\n mc.clear_database('foo')\n input('press enter to shut down')\n mc.destroy(True)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "test/mongo_controller.py", "file_name": "mongo_controller.py", "file_ext": "py", "file_size_in_byte": 5012, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pathlib.Path", "line_number": 30, "usage_type": "name"}, {"api_name": "os.access", "line_number": 39, "usage_type": "call"}, {"api_name": "os.X_OK", "line_number": 39, "usage_type": "attribute"}, {"api_name": "test.util.TestException", "line_number": 40, "usage_type": "call"}, {"api_name": "test.util", "line_number": 40, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 47, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 48, "usage_type": "call"}, {"api_name": "tempfile.mkdtemp", "line_number": 48, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 50, "usage_type": "call"}, {"api_name": "test.util.find_free_port", "line_number": 52, "usage_type": "call"}, {"api_name": "test.util", "line_number": 52, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 61, "usage_type": "call"}, {"api_name": "subprocess.STDOUT", "line_number": 61, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 62, "usage_type": "call"}, {"api_name": "pymongo.mongo_client.MongoClient", "line_number": 63, "usage_type": "call"}, {"api_name": "semver.compare", "line_number": 71, "usage_type": "call"}, {"api_name": "semver.compare", "line_number": 72, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 88, "usage_type": "call"}, {"api_name": "conftest.pytest_sessionstart", "line_number": 109, "usage_type": "call"}, {"api_name": "test.util.get_mongo_exe", "line_number": 110, "usage_type": "call"}, {"api_name": "test.util", "line_number": 110, "usage_type": "name"}, {"api_name": "test.util.get_temp_dir", "line_number": 111, "usage_type": "call"}, {"api_name": "test.util", "line_number": 111, "usage_type": "name"}]} +{"seq_id": "372147469", "text": "from progress_bar import ProgressBarDialog\nfrom gi.repository import Gtk\nimport gi\ngi.require_version('Gtk', '3.0')\n\n\nclass DialogAudio(Gtk.Dialog):\n\n def __init__(self, parent):\n self.data = {}\n Gtk.Dialog.__init__(self, \"Stegosaurus Audio\", parent, 0)\n self.set_border_width(10)\n\n box = self.get_content_area()\n notebook = Gtk.Notebook()\n box.add(notebook)\n\n page1 = self.get_hide_msg_window()\n page1.set_border_width(10)\n notebook.append_page(page1, Gtk.Label('Hide'))\n\n page2 = self.get_extract_msg_window()\n page2.set_border_width(10)\n notebook.append_page(page2, Gtk.Label('Extract'))\n\n self.show_all()\n\n def on_file_selected(self, widget, name):\n self.data[name] = widget.get_filename()\n\n def get_hide_msg_window(self):\n self.data = {\n \"filename\": \"\",\n \"key\": \"\",\n \"text_path\": \"\",\n \"frame_mode\": \"acak\",\n \"pixel_mode\": \"acak\"\n }\n grid = Gtk.Grid(column_homogeneous=True)\n grid.set_column_spacing(10)\n grid.set_row_spacing(10)\n\n button_open = Gtk.FileChooserButton(\"Open File\")\n button_open.set_width_chars(15)\n button_open.connect(\"selection-changed\",\n self.on_file_selected, \"filename\")\n key_entry = Gtk.Entry()\n button_open_text = Gtk.FileChooserButton(\"Open File\")\n button_open_text.set_width_chars(15)\n button_open_text.connect(\n \"selection-changed\", self.on_file_selected, \"text_path\")\n\n hbox1 = Gtk.Box()\n btn_frame_acak = Gtk.RadioButton.new_with_label_from_widget(\n None, \"Frame Acak\")\n btn_frame_acak.connect(\n \"toggled\", self.on_button_toggled, (\"frame_mode\", \"acak\"))\n\n btn_frame_seq = Gtk.RadioButton.new_from_widget(btn_frame_acak)\n btn_frame_seq.set_label(\"Frame Sequential\")\n btn_frame_seq.connect(\n \"toggled\", self.on_button_toggled, (\"frame_mode\", \"seq\"))\n hbox1.pack_start(btn_frame_acak, True, True, 10)\n hbox1.pack_start(btn_frame_seq, True, True, 10)\n\n hbox2 = Gtk.Box()\n btn_pixel_acak = Gtk.RadioButton.new_with_label_from_widget(\n None, \"Pixel Acak\")\n btn_pixel_acak.connect(\n \"toggled\", self.on_button_toggled, (\"pixel_mode\", \"acak\"))\n\n btn_pixel_seq = Gtk.RadioButton.new_from_widget(btn_pixel_acak)\n btn_pixel_seq.set_label(\"Pixel Sequential\")\n btn_pixel_seq.connect(\n \"toggled\", self.on_button_toggled, (\"pixel_mode\", \"seq\"))\n hbox2.pack_start(btn_pixel_acak, True, True, 10)\n hbox2.pack_start(btn_pixel_seq, True, True, 10)\n\n btn_encrypt_and_hide = Gtk.Button(\"Encrypt & Hide\")\n btn_encrypt_and_hide.connect(\"clicked\", self.on_button_submit, {\n \"key_entry\": key_entry\n })\n\n grid.attach(Gtk.Label(\"Choose Audio\"), 0, 0, 1, 1)\n grid.attach(button_open, 1, 0, 3, 1)\n grid.attach(Gtk.Label(\"Key\"), 0, 1, 1, 1)\n grid.attach(key_entry, 1, 1, 3, 1)\n grid.attach(Gtk.Label(\"Text\"), 0, 2, 1, 1)\n grid.attach(button_open_text, 1, 2, 3, 1)\n grid.attach(hbox1, 0, 3, 4, 1)\n grid.attach(hbox2, 0, 4, 4, 1)\n grid.attach(btn_encrypt_and_hide, 0, 5, 4, 1)\n\n return grid\n\n def on_button_toggled(self, button, data):\n if button.get_active():\n self.page1[data[0]] = data[1]\n\n def on_button_submit(self, button, additional_data):\n self.data[\"key\"] = additional_data[\"key_entry\"].get_text()\n if additional_data.get(\"save_path\"):\n self.data[\"save_path\"] = additional_data[\"save_path\"].get_text()\n print(self.data)\n dialog = ProgressBarDialog(self)\n response = dialog.run()\n\n dialog.destroy()\n\n def get_extract_msg_window(self):\n self.data = {\n \"filename\": \"\",\n \"key\": \"\",\n \"save_path\": \"\"\n }\n grid = Gtk.Grid(column_homogeneous=True)\n grid.set_column_spacing(10)\n grid.set_row_spacing(10)\n button_open = Gtk.FileChooserButton(\"Open File\")\n button_open.set_width_chars(15)\n button_open.connect(\"selection-changed\",\n self.on_file_selected, \"filename\")\n key_entry = Gtk.Entry()\n save_path = Gtk.Entry()\n\n btn_extract = Gtk.Button(\"Extract\")\n btn_extract.connect('clicked', self.on_button_submit, {\n \"key_entry\": key_entry,\n \"save_path\": save_path\n })\n\n grid.attach(Gtk.Label(\"Choose Video\"), 0, 0, 1, 1)\n grid.attach(button_open, 1, 0, 3, 1)\n grid.attach(Gtk.Label(\"Key\"), 0, 1, 1, 1)\n grid.attach(key_entry, 1, 1, 3, 1)\n grid.attach(Gtk.Label(\"Save File Path\"), 0, 2, 1, 1)\n grid.attach(save_path, 1, 2, 3, 1)\n grid.attach(btn_extract, 0, 3, 4, 1)\n\n return grid\n", "sub_path": "gui/dialog_audio.py", "file_name": "dialog_audio.py", "file_ext": "py", "file_size_in_byte": 4948, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "gi.require_version", "line_number": 4, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Dialog", "line_number": 7, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 7, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Dialog.__init__", "line_number": 11, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Dialog", "line_number": 11, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 11, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Notebook", "line_number": 15, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 15, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 20, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 20, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 24, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 24, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Grid", "line_number": 39, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 39, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.FileChooserButton", "line_number": 43, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 43, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Entry", "line_number": 47, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 47, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.FileChooserButton", "line_number": 48, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 48, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Box", "line_number": 53, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 53, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.RadioButton.new_with_label_from_widget", "line_number": 54, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.RadioButton", "line_number": 54, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 54, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.RadioButton.new_from_widget", "line_number": 59, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.RadioButton", "line_number": 59, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 59, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Box", "line_number": 66, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 66, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.RadioButton.new_with_label_from_widget", "line_number": 67, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.RadioButton", "line_number": 67, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 67, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.RadioButton.new_from_widget", "line_number": 72, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.RadioButton", "line_number": 72, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 72, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 79, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 79, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 84, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 84, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 86, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 86, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 88, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 88, "usage_type": "name"}, {"api_name": "progress_bar.ProgressBarDialog", "line_number": 105, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Grid", "line_number": 116, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 116, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.FileChooserButton", "line_number": 119, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 119, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Entry", "line_number": 123, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 123, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Entry", "line_number": 124, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 124, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 126, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 126, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 132, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 132, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 134, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 134, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 136, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 136, "usage_type": "name"}]} +{"seq_id": "80207107", "text": "import os\nfrom PyQt5 import uic\nmypath = os.path.dirname(__file__)\n_stopEnsembleDialogUI, _stopEnsembleDialog = \\\n uic.loadUiType(os.path.join(mypath, \"stopEnsembleDialog_UI.ui\"))\n \n\nclass stopEnsembleDialog(_stopEnsembleDialog, _stopEnsembleDialogUI):\n def __init__(self, dat, parent=None):\n '''\n Constructor for model setup dialog\n '''\n super(stopEnsembleDialog, self).__init__(parent=parent)\n self.setupUi(self) # Create the widgets\n self.buttonCode = 0\n self.terminateButton.clicked.connect(self.terminateEnsemble)\n self.disconnectButton.clicked.connect(self.disconnect)\n self.continueButton.clicked.connect(self.doNothing)\n\n def terminateEnsemble(self):\n self.buttonCode = 2\n self.close()\n\n def disconnect(self):\n self.buttonCode = 1\n self.close()\n\n def doNothing(self):\n self.buttonCode = 0\n self.close()\n", "sub_path": "foqus_lib/gui/uq/stopEnsembleDialog.py", "file_name": "stopEnsembleDialog.py", "file_ext": "py", "file_size_in_byte": 946, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "os.path.dirname", "line_number": 3, "usage_type": "call"}, {"api_name": "os.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "PyQt5.uic.loadUiType", "line_number": 5, "usage_type": "call"}, {"api_name": "PyQt5.uic", "line_number": 5, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "attribute"}]} +{"seq_id": "418619993", "text": "\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nclass Multi_Generator(tf.keras.models.Model):\n def __init__(self, op_type, gens_dict, **kwargs):\n super(Multi_Generator, self).__init__(**kwargs)\n self.gens_dict = gens_dict\n self.op_type = op_type\n ### 目前只有 I_to_Wxyz_to_Cxy_general 有用到\n self.I_to_W_separ = None\n self.I_to_W_focus = None\n self.W_to_C_separ = None\n self.W_to_C_focus = None\n\n def call(self, input_tensor, Mask=None, training=None):\n if (self.op_type == \"I_to_M_and_C\"): ### 最左邊的 I 是只 Model內本身的行為, 不會管 Model外 怎麼包喔, 意思就是 I 在 Model 外可以包成 I_w_M 也行, 反正 Model內都是唯一張img這樣子\n I = input_tensor\n M = self.gens_dict[\"I_to_M\"](input_tensor)\n C = self.gens_dict[\"I_to_C\"](input_tensor)\n return M, C\n\n elif(self.op_type == \"I_to_M_w_I_to_C\"): ### 最左邊的 I 是只 Model內本身的行為, 不會管 Model外 怎麼包喔, 意思就是 I 在 Model 外可以包成 I_w_M 也行, 反正 Model內都是唯一張img這樣子\n I = input_tensor\n M = self.gens_dict[\"I_to_M\"](input_tensor)\n M_w_I = M * I\n C = self.gens_dict[\"M_w_I_to_C\"](M_w_I)\n return M, C\n elif(self.op_type == \"I_to_M_w_I_to_W_to_C\"): ### 最左邊的 I 是只 Model內本身的行為, 不會管 Model外 怎麼包喔, 意思就是 I 在 Model 外可以包成 I_w_M 也行, 反正 Model內都是唯一張img這樣子\n I = input_tensor\n M = self.gens_dict[\"I_to_M\"](input_tensor)\n M_w_I = M * I\n W = self.gens_dict[\"M_w_I_to_W\"](M_w_I)\n C = self.gens_dict[\"W_to_C\"](W)\n return M, W, C\n\n elif(self.op_type == \"I_or_W_to_Cx_Cy\"): ### 最左邊的 I 是只 Model內本身的行為, 不會管 Model外 怎麼包喔, 意思就是 I 在 Model 外可以包成 I_w_M 也行, 反正 Model內都是唯一張img這樣子\n I = input_tensor\n Cx = self.gens_dict[\"I_to_Cx\"](I)\n Cy = self.gens_dict[\"I_to_Cy\"](I)\n return Cx, Cy ### 這個順序要跟 step8b_useG, step9c_train_step 對應到喔!\n elif(self.op_type == \"I_to_Wx_Wy_Wz\"): ### 最左邊的 I 是只 Model內本身的行為, 不會管 Model外 怎麼包喔, 意思就是 I 在 Model 外可以包成 I_w_M 也行, 反正 Model內都是唯一張img這樣子\n I = input_tensor\n Wx = self.gens_dict[\"I_to_Wx\"](I)\n Wy = self.gens_dict[\"I_to_Wy\"](I)\n Wz = self.gens_dict[\"I_to_Wz\"](I)\n return Wz, Wy, Wx ### 這個順序要跟 step8b_useG, step9c_train_step 對應到喔!\n elif(self.op_type == \"I_to_Wx_Wy_Wz_focus_to_Cx_Cy_focus\"): ### 最左邊的 I 是只 Model內本身的行為, 不會管 Model外 怎麼包喔, 意思就是 I 在 Model 外可以包成 I_w_M 也行, 反正 Model內都是唯一張img這樣子\n '''\n 注意 不能在這邊把 Wxyz concat 起來喔, 因為要分開算 loss!\n '''\n # I_pre = input_tensor\n # Wz_pre_raw, Wy_pre_raw, Wx_pre_raw = self.gens_dict[\"I_to_Wx_Wy_Wz\"](I_pre)\n # W_pre_raw = tf.concat([Wz_pre_raw, Wy_pre_raw, Wx_pre_raw], axis=-1)\n # W_pre_w_M = W_pre_raw * Mask\n\n # Cx_pre_raw, Cy_pre_raw = self.gens_dict[\"W_to_Cx_Cy\"](W_pre_w_M)\n # C_pre_raw = tf.concat([Cy_pre_raw, Cx_pre_raw], axis=-1)\n # C_pre_w_M = C_pre_raw * Mask\n # return W_pre_raw, W_pre_w_M, C_pre_raw, C_pre_w_M\n\n I_pre = input_tensor\n Wz_pre_raw, Wy_pre_raw, Wx_pre_raw = self.gens_dict[\"I_to_Wx_Wy_Wz\"](I_pre)\n W_pre_raw = tf.concat([Wz_pre_raw, Wy_pre_raw, Wx_pre_raw], axis=-1)\n b, h, w, c = W_pre_raw.shape ### 因為想嘗試 no_pad, 所以 pred 可能 size 會跟 gt 差一點點, 就以 pred為主喔!\n W_pre_w_M = W_pre_raw * Mask[:, :h, :w, :]\n\n Cx_pre_raw, Cy_pre_raw = self.gens_dict[\"W_to_Cx_Cy\"](W_pre_w_M)\n\n return Wz_pre_raw, Wy_pre_raw, Wx_pre_raw, Cx_pre_raw, Cy_pre_raw\n\n elif(self.op_type == \"I_to_Wxyz_to_Cxy_general\"): ### 最左邊的 I 是只 Model內本身的行為, 不會管 Model外 怎麼包喔, 意思就是 I 在 Model 外可以包成 I_w_M 也行, 反正 Model內都是唯一張img這樣子\n '''\n 注意 不能在這邊把 Wxyz concat 起來喔, 因為要分開算 loss!\n '''\n I_pre = input_tensor\n ''' W_sep '''\n if(self.I_to_W_separ):\n Wz_pre_raw, Wy_pre_raw, Wx_pre_raw = self.gens_dict[\"I_to_Wx_Wy_Wz\"](I_pre)\n W_pre_raw = tf.concat([Wz_pre_raw, Wy_pre_raw, Wx_pre_raw], axis=-1)\n else:\n W_pre_raw = self.gens_dict[\"I_to_Wx_Wy_Wz\"](I_pre)\n\n ''' W_focus '''\n ### 因為 後面有接網路, W 需要變成 下段網路的輸入, 所以需要 做準備看下段網路需不需要 乘M\n b, h, w, c = W_pre_raw.shape ### 因為想嘗試 no_pad, 所以 pred 可能 size 會跟 gt 差一點點, 就以 pred為主喔!\n if(self.I_to_W_focus): W_pre_w_M = W_pre_raw * Mask[:, :h, :w, :]\n else : W_pre_w_M = W_pre_raw\n\n ''' W_準備return的東西, 不過思考後, return Raw 才對, 所以整段不用了 '''\n ### 應該要return raw回去才對, 不用擔心train沒用到Mask, 因為我在train_step裡面有寫的很OK, 會在那邊才用到Mask\n # Wz_pre_w_M = W_pre_w_M[..., 0:1] ### 用 : 才可以 keepdims\n # Wy_pre_w_M = W_pre_w_M[..., 1:2] ### 用 : 才可以 keepdims\n # Wx_pre_w_M = W_pre_w_M[..., 2:3] ### 用 : 才可以 keepdims\n Wz_pre_raw = W_pre_raw[..., 0:1] ### 用 : 才可以 keepdims\n Wy_pre_raw = W_pre_raw[..., 1:2] ### 用 : 才可以 keepdims\n Wx_pre_raw = W_pre_raw[..., 2:3] ### 用 : 才可以 keepdims\n ###########################################################\n ''' C_sep '''\n if(self.W_to_C_separ):\n Cx_pre_raw, Cy_pre_raw = self.gens_dict[\"W_to_Cx_Cy\"](W_pre_w_M)\n C_pre_raw = tf.concat([Cy_pre_raw, Cx_pre_raw], axis=-1)\n else:\n C_pre_raw = self.gens_dict[\"W_to_Cx_Cy\"](W_pre_w_M)\n\n ''' C_focus '''\n ### 因為 後面沒有接網路了, C 不需要 變成誰的 輸入, 所以 不需要 乘M 囉\n # if(self.W_to_C_focus): C_pre_w_m = C_pre_raw * Mask[:, :h, :w, :]\n # else : C_pre_w_m = C_pre_raw\n\n ''' C_準備return的東西, 不過思考後, return Raw 才對, 所以整段不用了 '''\n ### 應該要return raw回去才對, 不用擔心train沒用到Mask, 因為我在train_step裡面有寫的很OK, 會在那邊才用到Mask\n # Cy_pre_w_M = C_pre_w_m[..., 0:1] ### 用 : 才可以 keepdims\n # Cx_pre_w_M = C_pre_w_m[..., 1:2] ### 用 : 才可以 keepdims\n Cy_pre_raw = C_pre_raw[..., 0:1] ### 用 : 才可以 keepdims\n Cx_pre_raw = C_pre_raw[..., 1:2] ### 用 : 才可以 keepdims\n\n '''統一拆開來 return '''\n ### 要不然 要區分 I_to_W_separ 和 I_to_W_separ 的 T/F , 要分四種方式 return , 太麻煩了不要找自己麻煩, 統一 拆開來 return\n return Wz_pre_raw, Wy_pre_raw, Wx_pre_raw, Cx_pre_raw, Cy_pre_raw\n\ndef see(model_obj, train_in_pre):\n M_pre, C_pre = model_obj.generator(train_in_pre)\n\n M_visual = (M_pre[0].numpy() * 255.).astype(np.uint8)\n\n from step08_b_use_G_generate_0_util import F_01_or_C_01_method1_visual_op, Value_Range_Postprocess_to_01\n C = Value_Range_Postprocess_to_01(C_pre[0])\n C_visual = F_01_or_C_01_method1_visual_op(C)\n\n fig, ax = plt.subplots(nrows=1, ncols=2)\n ax[0].imshow(M_visual)\n ax[1].imshow(C_visual)\n plt.show()\n\n\nif(__name__ == \"__main__\"):\n import numpy as np\n import time\n from kong_util.tf_model_util import Show_model_layer_names, Show_model_weights\n # data = np.ones(shape=(1, 512, 512, 3), dtype=np.float32)\n # start_time = time.time() # 看資料跑一次花多少時間\n # # test_g = Generator(hid_ch=64, depth_level=7, use_bias=False)\n # test_g = Generator(hid_ch= 128, depth_level=4, out_ch=1, unet_acti=\"sigmoid\", conv_block_num=1, ch_upper_bound= 2**14)\n # test_g(data)\n # print(\"cost time\", time.time() - start_time)\n # test_g.summary()\n # print(test_g(data))\n\n\n\n ############################################################################################################################\n ### 嘗試 真的 load tf_data 進來 train 看看\n import numpy as np\n from tqdm import tqdm\n from step06_a_datas_obj import *\n from step06_cFinal_tf_Data_builder import tf_Data_builder\n from step10_a2_loss_info_obj import Loss_info_builder\n from step09_c_train_step import *\n\n\n from step09_f1_multi_unet2_obj_I_to_M_w_I_to_C import *\n\n # model_obj = try_multi_unet\n model_obj = I_to_M_L4_ch032_and_M_w_I_to_C_L5_ch032\n model_obj = model_obj.build() ### 可替換成 上面 想測試的 model\n print(model_obj)\n\n ### 2. db_obj 和 tf_data\n db_obj = type9_mask_flow_have_bg_dtd_hdr_mix_and_paper.build()\n tf_data = tf_Data_builder().set_basic(db_obj, 1, train_shuffle=False).set_data_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_img_resize(( 512, 512) ).build_by_db_get_method().build()\n\n # ### 3. loss_info_obj\n G_mae_loss_infos = [Loss_info_builder().set_loss_type(\"mae\").build(),\n Loss_info_builder().set_loss_type(\"mae\").build()]\n ### 4. 跑起來試試看\n for n, (train_in, train_in_pre, train_gt, train_gt_pre, _) in enumerate(tqdm(tf_data.train_db_combine.take(50))):\n model_obj.train_step(model_obj=model_obj, in_data=train_in_pre, gt_data=train_gt_pre, loss_info_objs=G_mae_loss_infos)\n if(n == 0):\n model_obj.generator.summary()\n Show_model_weights(model_obj.generator)\n\n see(model_obj, train_in_pre)\n\n if(n == 2):\n print(model_obj.generator.gens_dict)\n ckpt_I_to_M = tf.train.Checkpoint(generator=model_obj.generator.gens_dict[\"I_to_M\"])\n ckpt_M_w_I_to_C = tf.train.Checkpoint(generator=model_obj.generator.gens_dict[\"M_w_I_to_C\"])\n\n ckpt_path_I_to_M = \"F:/kong_model2/data_dir/result/6_mask_unet/5_2_bce_block1_45678l/type8_blender-2_4l_ch032-flow_unet2-block1_ch032_sig_bce_s001_4l_ep060_copy-20211204_203747/ckpt\"\n ckpt_path_I_w_M_to_C = \"F:/kong_model2/data_dir/result/7_flow_unet/5_2_mae_block1_45678l_I_with_Mgt_to_C/type8_blender_os_book-2_L5_ch032-flow_unet2-block1_L5_ch032_mae_s001-20211125_170346/ckpt\"\n\n ckpt_read_manager_I_to_M = tf.train.CheckpointManager(ckpt_I_to_M, ckpt_path_I_to_M, max_to_keep=1)\n ckpt_read_manager_M_w_I_to_C = tf.train.CheckpointManager(ckpt_M_w_I_to_C, ckpt_path_I_w_M_to_C, max_to_keep=1)\n\n ckpt_I_to_M. restore(ckpt_read_manager_I_to_M.latest_checkpoint)\n ckpt_M_w_I_to_C.restore(ckpt_read_manager_M_w_I_to_C.latest_checkpoint)\n print(\"ckpt_read_manager_I_to_M.latest_checkpoint:\", ckpt_read_manager_I_to_M.latest_checkpoint)\n\n see(model_obj, train_in_pre)\n\n\n if(n == 10):\n model_obj.generator.save_weights(\"debug_data/try_save/weights\")\n iter10 = model_obj.generator.layers[0].weights[1]\n print(\"iter10:\", iter10)\n if(n == 20):\n iter20 = model_obj.generator.layers[0].weights[1]\n print(\"iter20:\", iter20)\n model_obj.generator.load_weights(\"debug_data/try_save/weights\")\n iter20_load10 = model_obj.generator.layers[0].weights[1]\n print(\"iter20_load10:\", iter20_load10)\n", "sub_path": "step07_b_0b_Multi_UNet.py", "file_name": "step07_b_0b_Multi_UNet.py", "file_ext": "py", "file_size_in_byte": 12073, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "tensorflow.keras", "line_number": 5, "usage_type": "attribute"}, {"api_name": "tensorflow.concat", "line_number": 64, "usage_type": "call"}, {"api_name": "tensorflow.concat", "line_number": 80, "usage_type": "call"}, {"api_name": "tensorflow.concat", "line_number": 102, "usage_type": "call"}, {"api_name": "step08_b_use_G_generate_0_util.Value_Range_Postprocess_to_01", "line_number": 128, "usage_type": "call"}, {"api_name": "step08_b_use_G_generate_0_util.F_01_or_C_01_method1_visual_op", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 134, "usage_type": "name"}, {"api_name": "step06_cFinal_tf_Data_builder.tf_Data_builder", "line_number": 171, "usage_type": "call"}, {"api_name": "step10_a2_loss_info_obj.Loss_info_builder", "line_number": 174, "usage_type": "call"}, {"api_name": "step10_a2_loss_info_obj.Loss_info_builder", "line_number": 175, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 177, "usage_type": "call"}, {"api_name": "kong_util.tf_model_util.Show_model_weights", "line_number": 181, "usage_type": "call"}, {"api_name": "tensorflow.train.Checkpoint", "line_number": 187, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 187, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Checkpoint", "line_number": 188, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 188, "usage_type": "attribute"}, {"api_name": "tensorflow.train.CheckpointManager", "line_number": 193, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 193, "usage_type": "attribute"}, {"api_name": "tensorflow.train.CheckpointManager", "line_number": 194, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 194, "usage_type": "attribute"}]} +{"seq_id": "650979663", "text": "import pandas as pd\nimport numpy as np\nfrom tabulate import tabulate\n\nHOME = \"/future/u/jtguibas/abae/data/\"\n\nclass Records:\n def __init__(self, k, proxy_scores, statistics, predicates):\n assert(proxy_scores.shape == predicates.shape == statistics.shape)\n self.k = k\n self.proxy_scores = proxy_scores\n self.statistics = statistics\n self.predicates = predicates\n\n self.sort = np.argsort(proxy_scores)\n self.statistics_sorted = statistics[self.sort]\n self.predicates_sorted = predicates[self.sort]\n self.ground_truth = statistics[predicates].mean()\n \n self.p = np.sum(self.predicates) / len(self.proxy_scores)\n self.sigma = np.std(self.statistics[self.predicates])\n self.strata = np.array_split(np.arange(self.sort.shape[0]), self.k)\n self.ps = np.zeros(self.k)\n self.sigmas = np.zeros(self.k)\n self.ms = np.zeros(self.k)\n for i in range(self.k):\n stratum = self.strata[i]\n _statistics = self.statistics_sorted[stratum].copy()\n _predicates = self.predicates_sorted[stratum].copy()\n self.ps[i] = np.sum(_predicates) / len(_predicates)\n self.sigmas[i] = np.std(_statistics[_predicates])\n self.ms[i] = np.mean(_statistics[_predicates])\n\n def sample(self, n, k=None):\n if k is None:\n sample_idxs = np.random.choice(self.sort.shape[0], n, replace=False)\n else:\n strata = np.array_split(np.arange(self.sort.shape[0]), self.k)[k]\n sample_idxs = np.random.choice(strata, n, replace=False)\n\n statistics = self.statistics_sorted[sample_idxs].copy()\n predicates = self.predicates_sorted[sample_idxs].copy()\n return statistics, predicates\n \n def summary(self):\n num_records = len(self.proxy_scores)\n table = tabulate(\n [\n [\"NUM_RECORDS\", num_records],\n [\"K\", self.k],\n [\"P_S\", np.round(self.ps.sum() / self.k, 5)],\n [\"SIGMA_S\", np.round(self.sigma, 5)],\n [\"P_K\", np.round(self.ps, 5)],\n [\"SIGMA_K\", np.round(self.sigmas, 5)],\n [\"M_K\", np.round(self.ms, 5)],\n ],\n headers=[\"Key\", \"Value\"]\n )\n print(table)\n \n \nclass JacksonRecords(Records):\n def __init__(self, k):\n self.name = \"jackson\"\n df = pd.read_csv(HOME + f\"{self.name}.csv\")\n proxy_scores = df[\"proxy_scores\"].to_numpy()\n statistics = df[\"statistics\"].to_numpy()\n predicates = df[\"predicates\"].to_numpy()\n super().__init__(k, proxy_scores, statistics, predicates)\n \n \nclass JacksonRedLightRecords(Records):\n def __init__(self, k):\n self.name = \"jackson_red_light\"\n df = pd.read_csv(HOME + f\"{self.name}.csv\")\n proxy_scores = df[\"proxy_scores\"].to_numpy()\n statistics = df[\"statistics\"].to_numpy()\n predicates = df[\"predicates\"].to_numpy()\n super().__init__(k, proxy_scores, statistics, predicates)\n \n \nclass AmazonOfficeSuppliesRecords(Records):\n def __init__(self, k):\n self.name = \"amazon_office\"\n df = pd.read_csv(HOME + f\"{self.name}.csv\")\n proxy_scores = df[\"proxy_scores\"].to_numpy()\n statistics = df[\"statistics\"].to_numpy()\n predicates = df[\"predicates\"].to_numpy()\n super().__init__(k, proxy_scores, statistics, predicates)\n \n \nclass MovieFacesV2Records(Records):\n def __init__(self, k):\n self.name = \"moviefacesv2\"\n proxy_scores = np.load(\"/future/u/jtguibas/aggpred/data/movie-faces-proxy-score-v3.npy\")[:, 0]\n predicates = np.load(\"/future/u/jtguibas/aggpred/data/movie-faces-predicates-v2.npy\")\n statistics = np.load(\"/future/u/jtguibas/aggpred/data/movie-faces-statistics-v2.npy\")\n super().__init__(k, proxy_scores, statistics, predicates)\n \n \nclass CelebARecords(Records):\n def __init__(self, k):\n self.name = \"celeba\"\n df = pd.read_csv(HOME + f\"{self.name}.csv\")\n proxy_scores = df[\"proxy_scores\"].to_numpy()\n statistics = df[\"statistics\"].to_numpy()\n predicates = df[\"predicates\"].to_numpy()\n super().__init__(k, proxy_scores, statistics, predicates) \n \n\nclass Trec05PRecords(Records):\n def __init__(self, k):\n self.name = \"trec05p\"\n df = pd.read_csv(HOME + f\"{self.name}.csv\")\n proxy_scores = df[\"proxy_scores\"].to_numpy()\n statistics = df[\"statistics\"].to_numpy()\n predicates = df[\"predicates\"].to_numpy()\n super().__init__(k, proxy_scores, statistics, predicates)\n \nclass TaipeiRecords(Records):\n def __init__(self, k):\n self.name = \"taipei\"\n df = pd.read_csv(HOME + f\"{self.name}.csv\")\n proxy_scores = df[\"proxy_scores\"].to_numpy()\n statistics = df[\"statistics\"].to_numpy()\n predicates = df[\"predicates\"].to_numpy()\n super().__init__(k, proxy_scores, statistics, predicates) \n \n \nclass SyntheticRecords(Records):\n def __init__(self, k, alpha=0.1, beta=0.5, N=1000000):\n self.name = \"synthetic\"\n rng = np.random.RandomState(3212142)\n proxy_scores = rng.beta(alpha, beta, size=N)\n statistics = rng.normal(10, 3, N)\n predicates = rng.binomial(n=1, p=proxy_scores).astype(bool)\n super().__init__(k, proxy_scores, statistics, predicates)\n \n \nclass SyntheticControlRecords(Records):\n def __init__(self, k, ps, sigmas, ms, N=1000000):\n self.name = \"synthetic_control\"\n rng = np.random.RandomState(3212142)\n strata_size = N // k\n proxy_scores = []\n statistics = np.concatenate([rng.normal(ms[i], sigmas[i], N // k) for i in range(k)])\n predicates = []\n for i in range(k):\n a = rng.binomial(n=1, p=[ps[i]]*strata_size)\n c = rng.binomial(n=strata_size, p=a).astype(bool)\n proxy_scores.append(a)\n predicates.append(c)\n proxy_scores = np.arange(N)\n predicates = np.concatenate(predicates)\n super().__init__(k, proxy_scores, statistics, predicates)\n \n \nclass SyntheticComplexPredicatesRecords(Records):\n def __init__(self, k, version=\"opt\"):\n self.name = f\"synthetic_complex_predicates_{version}\"\n N = 1000000\n rng = np.random.RandomState(3212142)\n\n proxy_scores_a = rng.beta(0.4, 1, N)\n proxy_scores_b = rng.beta(0.2, 1, N)\n\n proxy_scores_gt = proxy_scores_a * proxy_scores_b\n\n if version == \"opt\":\n proxy_scores = proxy_scores_gt.copy()\n elif version == \"left\":\n proxy_scores = proxy_scores_a\n elif version == \"right\":\n proxy_scores = proxy_scores_b\n else:\n raise NotImplementedError\n\n statistics = rng.normal(10, 3, N)\n predicates = np.zeros(N)\n predicates = rng.binomial(n=1, p=proxy_scores_gt)\n\n predicates = predicates.astype(bool)\n super().__init__(k, proxy_scores, statistics, predicates)\n \n \nclass JacksonRedLightMultProxyRecords(Records):\n def __init__(self, k):\n self.name = \"jackson_red_light_mult\"\n df = pd.read_csv(HOME + f\"{self.name}.csv\")\n proxy_scores = df[\"proxy_scores\"].to_numpy()\n statistics = df[\"statistics\"].to_numpy()\n predicates = df[\"predicates\"].to_numpy()\n super().__init__(k, proxy_scores, statistics, predicates)\n \n \nclass JacksonRedLightCarProxyRecords(Records):\n def __init__(self, k):\n self.name = \"jackson_red_light_car_proxy\"\n df = pd.read_csv(HOME + f\"{self.name}.csv\")\n proxy_scores = df[\"proxy_scores\"].to_numpy()\n statistics = df[\"statistics\"].to_numpy()\n predicates = df[\"predicates\"].to_numpy()\n super().__init__(k, proxy_scores, statistics, predicates)\n \n \nclass JacksonRedLightLightProxyRecords(Records):\n def __init__(self, k):\n self.name = \"jackson_red_light_light_proxy\"\n df = pd.read_csv(HOME + f\"{self.name}.csv\")\n proxy_scores = df[\"proxy_scores\"].to_numpy()\n statistics = df[\"statistics\"].to_numpy()\n predicates = df[\"predicates\"].to_numpy()\n super().__init__(k, proxy_scores, statistics, predicates)", "sub_path": "abae/data.py", "file_name": "data.py", "file_ext": "py", "file_size_in_byte": 8358, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "numpy.argsort", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.array_split", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.array_split", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 39, "usage_type": "attribute"}, {"api_name": "tabulate.tabulate", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 55, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 65, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 75, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 97, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 104, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 114, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.random.RandomState", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 133, "usage_type": "attribute"}, {"api_name": "numpy.random.RandomState", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 143, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.random.RandomState", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 162, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 179, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 189, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 199, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 209, "usage_type": "call"}]} +{"seq_id": "275392939", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Aug 15 18:15:03 2020\r\n\r\n@author: Lishen Qiu\r\n\"\"\"\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch\r\nfrom numpy.linalg import svd\r\nfrom numpy.random import normal\r\nimport math\r\nfrom math import sqrt\r\nfrom torchsummary import summary\r\nimport scipy.io as io\r\nimport numpy as np\r\nimport torch.optim as optim\r\nimport torch.utils.data \r\nimport torch\r\nimport os\r\nimport os.path as osp\r\nfrom torch.autograd import Variable\r\nfrom torch.utils.data import Dataset, DataLoader\r\n\r\n\r\nclass ChannelAttention(nn.Module):\r\n def __init__(self, in_planes, ratio):\r\n super(ChannelAttention, self).__init__()\r\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\r\n self.max_pool = nn.AdaptiveMaxPool2d(1)\r\n self.fc1 = nn.Conv2d(in_planes, in_planes // ratio, 1, bias=False)\r\n self.relu1 = nn.ReLU()\r\n self.fc2 = nn.Conv2d(in_planes // ratio, in_planes, 1, bias=False)\r\n self.sigmoid = nn.Sigmoid()\r\n\r\n def forward(self, x):\r\n avg_out = self.fc2(self.relu1(self.fc1(self.avg_pool(x))))\r\n max_out = self.fc2(self.relu1(self.fc1(self.max_pool(x))))\r\n out = avg_out + max_out\r\n return self.sigmoid(out)\r\n\r\nclass conv_1_block(nn.Module):\r\n \"\"\"\r\n Convolution Block \r\n \"\"\"\r\n def __init__(self, in_ch, out_ch,kernel_size_L,kernel_size_W,stride):\r\n super(conv_1_block, self).__init__()\r\n \r\n self.conv1 = nn.Sequential(\r\n nn.Conv2d(in_ch, out_ch, kernel_size=(kernel_size_L,kernel_size_W), stride=stride,padding=(0,kernel_size_W//2), bias=True),\r\n )\r\n\r\n def forward(self, x):\r\n x1 = self.conv1(x)\r\n return x1\r\n \r\nclass conv_2_block(nn.Module):\r\n \"\"\"\r\n Convolution Block \r\n \"\"\"\r\n def __init__(self, in_ch, out_ch,kernel_size_L,kernel_size_W,stride):\r\n super(conv_2_block, self).__init__()\r\n \r\n self.conv1 = nn.Sequential(\r\n nn.Conv2d(in_ch, out_ch, kernel_size=(kernel_size_L,kernel_size_W), stride=stride,padding=(0,kernel_size_W//2), bias=True),\r\n nn.ReLU(inplace=True),\r\n )\r\n self.conv2 = nn.Sequential(\r\n nn.Conv2d(out_ch, out_ch, kernel_size=(kernel_size_L,kernel_size_W), stride=stride,padding=(0,kernel_size_W//2), bias=True),\r\n nn.ReLU(inplace=True),\r\n )\r\n self.ca =ChannelAttention(out_ch,8)\r\n self.sp =SpatialAttention(kernel_size_L,kernel_size_W,stride=1)\r\n def forward(self, x):\r\n \r\n x1 = self.conv1(x)\r\n x1 = self.ca(x1)* x1\r\n x1 = self.conv2(x1)\r\n x1 = self.ca(x1)* x1\r\n \r\n xout=x1+x\r\n return xout\r\n \r\n\r\n \r\n \r\nclass DRnet(nn.Module):#库中的torch.nn.Module模块\r\n def __init__(self,in_channels =1):\r\n super(DRnet, self).__init__()\r\n \r\n \r\n self.conv1_1=conv_1_block( 2, 32, kernel_size_L=1,kernel_size_W=3, stride=1)\r\n self.conv1_2=conv_2_block(32, 32, kernel_size_L=1,kernel_size_W=3, stride=1)\r\n self.conv1_3=conv_2_block(32, 32, kernel_size_L=1,kernel_size_W=3, stride=1)\r\n self.conv1_4=conv_2_block(32, 32, kernel_size_L=1,kernel_size_W=3, stride=1)\r\n \r\n self.conv2_1=conv_1_block( 2, 32, kernel_size_L=1,kernel_size_W=5, stride=1)\r\n self.conv2_2=conv_2_block(32, 32, kernel_size_L=1,kernel_size_W=5, stride=1)\r\n self.conv2_3=conv_2_block(32, 32, kernel_size_L=1,kernel_size_W=5, stride=1)\r\n self.conv2_4=conv_2_block(32, 32, kernel_size_L=1,kernel_size_W=5, stride=1)\r\n \r\n self.conv3_1=conv_1_block( 2, 32, kernel_size_L=1,kernel_size_W=13, stride=1)\r\n self.conv3_2=conv_2_block(32, 32, kernel_size_L=1,kernel_size_W=13, stride=1)\r\n self.conv3_3=conv_2_block(32, 32, kernel_size_L=1,kernel_size_W=13, stride=1)\r\n self.conv3_4=conv_2_block(32, 32, kernel_size_L=1,kernel_size_W=13, stride=1)\r\n \r\n self.conv4_1=conv_1_block( 2, 32, kernel_size_L=1,kernel_size_W=15, stride=1)\r\n self.conv4_2=conv_2_block(32, 32, kernel_size_L=1,kernel_size_W=15, stride=1)\r\n self.conv4_3=conv_2_block(32, 32, kernel_size_L=1,kernel_size_W=15, stride=1)\r\n self.conv4_4=conv_2_block(32, 32, kernel_size_L=1,kernel_size_W=15, stride=1)\r\n \r\n self.conv1m1_1 = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=(1,3),padding=(0,1)) \r\n self.conv1m1_2 = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=(1,5),padding=(0,2)) \r\n self.conv1m1_3 = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=(1,13),padding=(0,6)) \r\n self.conv1m1_4 = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=(1,15),padding=(0,7)) \r\n \r\n def forward(self, x):\r\n\r\n x1 = self.conv1_1(x)\r\n x1 = self.conv1_2(x1)\r\n x1 = self.conv1_3(x1)\r\n x1 = self.conv1_4(x1)\r\n x1 = self.conv1m1_1(x1)\r\n \r\n x2 = self.conv2_1(x)\r\n x2 = self.conv2_2(x2)\r\n x2 = self.conv2_3(x2)\r\n x2 = self.conv2_4(x2)\r\n x2 = self.conv1m1_2(x2)\r\n\r\n x3 = self.conv3_1(x)\r\n x3 = self.conv3_2(x3)\r\n x3 = self.conv3_3(x3)\r\n x3 = self.conv3_4(x3)\r\n x3 = self.conv1m1_3(x3)\r\n\r\n x4 = self.conv4_1(x)\r\n x4 = self.conv4_2(x4)\r\n x4 = self.conv4_3(x4)\r\n x4 = self.conv4_4(x4)\r\n x4 = self.conv1m1_4(x4)\r\n\r\n\r\n Xout=x1+x2+x3+x4\r\n\r\n return Xout\r\n \r\n \r\n\r\n\r\n\r\n", "sub_path": "Stage2_model3.py", "file_name": "Stage2_model3.py", "file_ext": "py", "file_size_in_byte": 5434, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "torch.nn.Module", "line_number": 26, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.AdaptiveAvgPool2d", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.nn.AdaptiveMaxPool2d", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 33, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 42, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 50, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 57, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 57, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 64, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 65, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 66, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 68, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 69, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 70, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 87, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 87, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 112, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 113, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 114, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 115, "usage_type": "name"}]} +{"seq_id": "414851731", "text": "import datetime\nfrom django.db.models import Sum, Count\nfrom telebot import types\nfrom django.core.management.base import BaseCommand, CommandError\nfrom monitoring.models import *\nimport telebot\n\n# from monitoring.models import Worker\n\ntoken = ''\n\nbot = telebot.TeleBot(token)\n\npools_name = list(UserPools.objects.values('name'))\nworkers_name = list(Worker.objects.values('name'))\n\n\ndef is_in(name, variable):\n for item in variable:\n if item['name'] == name:\n return True\n return False\n\n\nclass Command(BaseCommand):\n help = 'Closes the specified poll for voting'\n\n # def add_arguments(self, parser):\n # parser.add_argument('message', nargs='+', type=str)\n\n def handle(self, *args, **options):\n bot.polling(none_stop=True)\n\n\n# 240111655\n# Обработчик команд '/start' и '/help'.\n@bot.message_handler(commands=['help'])\ndef handle_help(message):\n markup = types.ReplyKeyboardMarkup()\n markup.row('/pools')\n markup.row('/pools_info')\n markup.row('/pools_details')\n markup.row('/workers')\n markup.row('/workers_info')\n markup.row('/worker_details')\n # sendtext = ''\n bot.send_message(message.chat.id, \"Commands:\", reply_markup=markup)\n pass\n\n\n@bot.message_handler(content_types=[\"text\"])\ndef new_message(message): # Название функции не играет никакой роли, в принципе\n workers = Worker.objects.filter(last_update__lt=(datetime.datetime.now() - datetime.timedelta(minutes=5)))\n for i in workers:\n print(i.name)\n print(datetime.datetime.now() - datetime.timedelta(minutes=5))\n com = message.text\n print(com)\n\n\n if com == '/pools':\n\n pools = UserPools.objects.all()\n sendtext = ''\n for i in pools:\n sendtext = sendtext + i.name + \\\n '\\r\\n\\r\\n'\n bot.send_message(message.chat.id, sendtext)\n\n\n if com == '/pools_info':\n\n pools = UserPools.objects.all()\n sendtext = ''\n for i in pools:\n total_hash_pool = Worker.objects.filter(address_pool=i).aggregate(Sum('reported_hash_rate'))[\n 'reported_hash_rate__sum']\n total_hash_claymore_base = Worker.objects.filter(address_pool=i).aggregate(Sum('sum_hr_base'))[\n 'sum_hr_base__sum']\n amount_workers = \\\n Worker.objects.filter(address_pool=i).filter(reported_hash_rate__gt=0).aggregate(Count('name'))[\n 'name__count']\n sendtext = sendtext + i.name + '\\r\\n' + \\\n 'Address: ' + i.address + ':\\r\\n' + \\\n 'Active workers: ' + str(amount_workers) + ';\\r\\n' + \\\n 'Pool hashrate: ' + str(total_hash_pool) + 'Mh/s;\\r\\n' + \\\n 'Claymore hashrate: ' + str(total_hash_claymore_base) + 'Mh/s' + \\\n '\\r\\n\\r\\n'\n bot.send_message(message.chat.id, sendtext)\n\n\n if com == '/pools_details':\n\n pools = UserPools.objects.all()\n markup = types.ReplyKeyboardMarkup()\n for i in pools:\n markup.row(i.name)\n bot.send_message(message.chat.id, \"Choose pool: \", reply_markup=markup)\n\n\n if is_in(com, pools_name) == True:\n\n pool = UserPools.objects.get(name=com)\n sendtext = ''\n total_hash_pool = \\\n Worker.objects.filter(address_pool=pool).aggregate(Sum('reported_hash_rate'))[\n 'reported_hash_rate__sum']\n total_hash_claymore_base = \\\n Worker.objects.filter(address_pool=pool).aggregate(Sum('sum_hr_base'))['sum_hr_base__sum']\n amount_workers = \\\n Worker.objects.filter(address_pool=pool).filter(reported_hash_rate__gt=0).aggregate(\n Count('name'))['name__count']\n offline_workers = \\\n Worker.objects.filter(address_pool=pool).filter(reported_hash_rate=0).aggregate(\n Count('name'))['name__count']\n sendtext = sendtext + pool.name + '\\r\\n' + \\\n 'Address: ' + pool.address + ':\\r\\n' + \\\n 'Active workers: ' + str(amount_workers) + ';\\r\\n' + \\\n 'Offline workers: ' + str(offline_workers) + ';\\r\\n' + \\\n 'Pool hashrate: ' + str(total_hash_pool) + 'Mh/s;\\r\\n' + \\\n 'Claymore hashrate: ' + str(total_hash_claymore_base) + 'Mh/s\\r\\n\\r\\n\\r\\n'\n\n workers = Worker.objects.filter(address_pool=pool).values('name',\n 'reported_hash_rate').distinct().order_by(\n 'name')\n for i in workers:\n sendtext = sendtext + i['name'] + ' (' + str(i['reported_hash_rate']) + 'Mh/s' + ')' \\\n '\\r\\n\\r\\n'\n bot.send_message(message.chat.id, sendtext)\n\n markup = types.ReplyKeyboardMarkup()\n markup.row('/pools')\n markup.row('/pools_info')\n markup.row('/pools_details')\n markup.row('/workers')\n markup.row('/workers_info')\n markup.row('/worker_details')\n # sendtext = ''\n\n\n if com == '/workers':\n\n workers = Worker.objects.values('name').distinct().order_by('name')\n sendtext = ''\n for i in workers:\n sendtext = sendtext + i['name'] + \\\n '\\r\\n\\r\\n'\n bot.send_message(message.chat.id, sendtext)\n\n\n if com == '/workers_info':\n\n workers = Worker.objects.all().order_by('address_pool__name', 'name')\n sendtext = ''\n for i in workers:\n sendtext = sendtext + i.name + '\\r\\n' + \\\n 'Pools: ' + str(i.pools) + '\\r\\n' + \\\n 'Claymore base hashrate: ' + str(i.sum_hr_base) + 'Mh/s;\\r\\n' + \\\n 'Claymore sec hashrate: ' + str(i.sum_hr_sec) + 'Mh/s;\\r\\n' + \\\n 'Pool hashrate: ' + str(i.reported_hash_rate) + 'Mh/s;\\r\\n' + \\\n 'Claymore uptime: ' + str(datetime.timedelta(minutes=i.claymore_uptime)) + \\\n '\\r\\n\\r\\n'\n bot.send_message(message.chat.id, sendtext)\n\n\n if com == '/worker_details':\n\n workers = Worker.objects.values('name').distinct().order_by('name')\n markup = types.ReplyKeyboardMarkup()\n for i in workers:\n markup.row(i['name'])\n bot.send_message(message.chat.id, \"Choose rig: \", reply_markup=markup)\n\n if is_in(com, workers_name) == True:\n worker = Worker.objects.filter(name=com)\n sendtext = ''\n for i in worker:\n if i.sum_hr_base == 0 and i.reported_hash_rate == 0:\n sendtext = sendtext + i.name + ' OFFLINE'\n else:\n sendtext = sendtext + i.name + '\\r\\n' + \\\n 'Pools: ' + str(i.pools) + '\\r\\n' + \\\n 'Base pool name: ' + i.address_pool.name + '\\r\\n' + \\\n 'Pool hashrate: ' + str(i.reported_hash_rate) + 'Mh/s;\\r\\n' + \\\n 'Claymore base hashrate: ' + str(\n i.sum_hr_base) + 'Mh/s ' + '(' + i.hr_details_base + ');\\r\\n' + \\\n 'Claymore sec hashrate: ' + str(\n i.sum_hr_sec) + 'Mh/s ' + '(' + i.hr_details_sec + ');\\r\\n' + \\\n 'Temperatures: ' + i.temperature + ';\\r\\n' + \\\n 'Fun speed: ' + i.fun_speed + ';\\r\\n' + \\\n 'System uptime: ' + str(datetime.timedelta(seconds=i.uptime)) + ';\\r\\n' + \\\n 'Claymore uptime: ' + str(\n datetime.timedelta(minutes=i.claymore_uptime)) + '\\r\\n' + \\\n 'Version: ' + i.claymore_version + \\\n '\\r\\n\\r\\n'\n bot.send_message(message.chat.id, sendtext)\n\n\n if com == 'список_пулов':\n pools = UserPools.objects.all()\n sendtext = ''\n markup = types.ReplyKeyboardMarkup()\n for i in pools:\n # sendtext = i.name + ' ' + i.address\n markup.row(i.name + ' ' + i.address)\n # sendtext = 'sdsdsd'\n print(sendtext)\n bot.send_message(message.chat.id, \"выбери пул: \", reply_markup=markup)\n\n if com == 'список_ригов':\n workers = Worker.objects.all()\n sendtext = ''\n markup = types.ReplyKeyboardMarkup()\n for i in workers:\n sendtext = i.name\n # sendtext = 'sdsdsd'\n print(sendtext)\n markup.row(i.name)\n bot.send_message(message.chat.id, \"выбери риг: \", reply_markup=markup)\n # worker = Worker.objects.get(name=com)\n # sendtext = worker.sum_hr_base\n #\n # bot.send_message(message.chat.id, sendtext)\n # markup = types.ReplyKeyboardMarkup()\n # markup.row('эти', 'сверху')\n # markup.row('эти', 'всередине')\n # markup.row('эти', 'снизу')\n # bot.send_message(message.chat.id, \"Клацни ёпт:\", reply_markup=markup)\n\n # print(bot)\n # print(message)\n\n #\n # if __name__ == '__main__':\n #\n", "sub_path": "monitoring/management/commands/telegram_bot.py", "file_name": "telegram_bot.py", "file_ext": "py", "file_size_in_byte": 9190, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "telebot.TeleBot", "line_number": 12, "usage_type": "call"}, {"api_name": "django.core.management.base.BaseCommand", "line_number": 25, "usage_type": "name"}, {"api_name": "telebot.types.ReplyKeyboardMarkup", "line_number": 39, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 39, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 53, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 53, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 53, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 56, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 56, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 56, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 76, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 78, "usage_type": "call"}, {"api_name": "django.db.models.Count", "line_number": 81, "usage_type": "call"}, {"api_name": "telebot.types.ReplyKeyboardMarkup", "line_number": 95, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 95, "usage_type": "name"}, {"api_name": "django.db.models.Sum", "line_number": 106, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 109, "usage_type": "call"}, {"api_name": "django.db.models.Count", "line_number": 112, "usage_type": "call"}, {"api_name": "django.db.models.Count", "line_number": 115, "usage_type": "call"}, {"api_name": "telebot.types.ReplyKeyboardMarkup", "line_number": 131, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 131, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 161, "usage_type": "call"}, {"api_name": "telebot.types.ReplyKeyboardMarkup", "line_number": 169, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 169, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 191, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 193, "usage_type": "call"}, {"api_name": "telebot.types.ReplyKeyboardMarkup", "line_number": 202, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 202, "usage_type": "name"}, {"api_name": "telebot.types.ReplyKeyboardMarkup", "line_number": 213, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 213, "usage_type": "name"}]} +{"seq_id": "113015558", "text": "##(IMPLEMENTATION) Specify Data Loaders\r\n\r\n'''Use the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)!'''\r\n\r\n\r\nimport os\r\nfrom torchvision import datasets\r\n\r\n\r\nfrom PIL import Image,ImageFile\r\nImageFile.LOAD_TRUNCATED_IMAGES = True\r\n### TODO: Write data loaders for training, validation, and test sets\r\n## Specify appropriate transforms, and batch_sizes\r\n\r\nnormalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225])\r\n \r\ndata_transform = {\r\n 'train' : transforms.Compose([\r\n transforms.RandomResizedCrop(224),\r\n transforms.RandomHorizontalFlip(), \r\n transforms.ToTensor(),\r\n normalize,\r\n ]),\r\n 'test' : transforms.Compose([\r\n transforms.Resize(256),\r\n transforms.CenterCrop(224), \r\n transforms.ToTensor(),\r\n normalize,\r\n ]),\r\n 'valid' : transforms.Compose([\r\n transforms.Resize(256),\r\n transforms.CenterCrop(224), \r\n transforms.ToTensor(),\r\n normalize,\r\n ]),\r\n} \r\n\r\n\r\ntrain_dataset = datasets.ImageFolder(root='./dogImages/train',\r\n transform=data_transform['train'])\r\ntrain_loader = torch.utils.data.DataLoader(train_dataset,\r\n batch_size=20, shuffle=True,\r\n num_workers=0)\r\n\r\ntest_dataset = datasets.ImageFolder(root='./dogImages/test',\r\n transform=data_transform['test'])\r\ntest_loader = torch.utils.data.DataLoader(test_dataset,\r\n batch_size=20,\r\n num_workers=0)\r\n\r\nvalid_dataset = datasets.ImageFolder(root='./dogImages/valid',\r\n transform=data_transform['valid'])\r\nvalid_loader = torch.utils.data.DataLoader(valid_dataset,\r\n batch_size=20,\r\n num_workers=0)\r\n\r\nloaders_scratch = {\r\n 'train': train_loader,\r\n 'valid': valid_loader,\r\n 'test': test_loader\r\n}\r\n\r\n\r\n### (IMPLEMENTATION) Model Architecture\r\n\r\n'''Use transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`.'''\r\n\r\nimport torchvision.models as models\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nimport dill as dill\r\n\r\n## TODO: Specify model architecture \r\nmodel_transfer = models.resnet18(pretrained=True)\r\n\r\n# Freeze training for all \"features\" layers\r\nfor param in model_transfer.parameters():\r\n param.requires_grad = False\r\n\r\nn_inputs = model_transfer.fc.in_features\r\nlast_layer = nn.Linear(n_inputs, len(train_dataset.classes))\r\nmodel_transfer.fc = last_layer\r\n\r\nif use_cuda:\r\n model_transfer = model_transfer.cuda()\r\n\r\n\r\n### (IMPLEMENTATION) Train and Validate the Model\r\n##\r\ndef train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path):\r\n \"\"\"returns trained model\"\"\"\r\n # initialize tracker for minimum validation loss\r\n valid_loss_min = np.Inf\r\n\r\n for epoch in range(1, n_epochs + 1):\r\n # initialize variables to monitor training and validation loss\r\n train_loss = 0.0\r\n valid_loss = 0.0\r\n\r\n ###################\r\n # train the model #\r\n ###################\r\n model.train()\r\n for batch_idx, (data, target) in enumerate(loaders['train']):\r\n # move to GPU\r\n if use_cuda:\r\n data, target = data.cuda(), target.cuda()\r\n ## find the loss and update the model parameters accordingly\r\n ## record the average training loss, using something like\r\n ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss))\r\n # clear the gradients of all optimized variables\r\n optimizer.zero_grad()\r\n # forward pass: compute predicted outputs by passing inputs to the model\r\n output = model(data)\r\n # calculate the batch loss\r\n loss = criterion(output, target)\r\n # backward pass: compute gradient of the loss with respect to model parameters\r\n loss.backward()\r\n # perform a single optimization step (parameter update)\r\n optimizer.step()\r\n # update training loss\r\n train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss))\r\n\r\n ######################\r\n # validate the model #\r\n ######################\r\n model.eval()\r\n for batch_idx, (data, target) in enumerate(loaders['valid']):\r\n # move to GPU\r\n if use_cuda:\r\n data, target = data.cuda(), target.cuda()\r\n ## update the average validation loss\r\n # forward pass: compute predicted outputs by passing inputs to the model\r\n output = model(data)\r\n # calculate the batch loss\r\n loss = criterion(output, target)\r\n # update average validation loss\r\n valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss))\r\n\r\n # calculate average losses\r\n train_loss = train_loss / len(train_loader.dataset)\r\n valid_loss = valid_loss / len(valid_loader.dataset)\r\n\r\n # print training/validation statistics\r\n print('Epoch: {} \\tTraining Loss: {:.6f} \\tValidation Loss: {:.6f}'.format(\r\n epoch,\r\n train_loss,\r\n valid_loss\r\n ))\r\n\r\n ## TODO: save the model if validation loss has decreased\r\n # save model if validation loss has decreased\r\n if valid_loss <= valid_loss_min:\r\n print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(\r\n valid_loss_min,\r\n valid_loss))\r\n torch.save(model.state_dict(), save_path)\r\n valid_loss_min = valid_loss\r\n # return trained model\r\n return model\r\n\r\n\r\n### (IMPLEMENTATION) Specify Loss Function and Optimizer\r\n\r\n'''Use the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.html#loss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below'''\r\n\r\ncriterion_transfer = nn.CrossEntropyLoss()\r\noptimizer_transfer = optim.Adam(filter(lambda par: par.requires_grad,model_transfer.parameters()))\r\n\r\n### (IMPLEMENTATION) Train and Validate the Model\r\n\r\n'''Train and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. '''\r\n\r\n# train the model\r\nloaders_transfer = loaders_scratch\r\nn_epochs = 10\r\n\r\nmodel_transfer = train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt')\r\n\r\n# load the model that got the best validation accuracy (uncomment the line below)\r\nmodel_transfer.load_state_dict(torch.load('model_transfer.pt'))\r\n\r\n\r\ntorch.save(model_transfer, './model_transfer_v01.pt' )\r\n\r\ntorch.save(model_transfer, './model_transfer_v01.pt', pickle_module=dill)\r\n\r\n\r\n### (IMPLEMENTATION) Test the Model\r\n\r\n'''Try out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. '''\r\n\r\n\r\ndef test(loaders, model, criterion, use_cuda):\r\n\r\n # monitor test loss and accuracy\r\n test_loss = 0.\r\n correct = 0.\r\n total = 0.\r\n\r\n model.eval()\r\n for batch_idx, (data, target) in enumerate(loaders['test']):\r\n # move to GPU\r\n if use_cuda:\r\n data, target = data.cuda(), target.cuda()\r\n # forward pass: compute predicted outputs by passing inputs to the model\r\n output = model(data)\r\n # calculate the loss\r\n loss = criterion(output, target)\r\n # update average test loss \r\n test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss))\r\n # convert output probabilities to predicted class\r\n pred = output.data.max(1, keepdim=True)[1]\r\n # compare predictions to true label\r\n correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy())\r\n total += data.size(0)\r\n \r\n print('Test Loss: {:.6f}\\n'.format(test_loss))\r\n\r\n print('\\nTest Accuracy: %2d%% (%2d/%2d)' % (\r\n 100. * correct / total, correct, total))\r\n\r\n# call test function \r\ntest(loaders_transfer, model_transfer, criterion_transfer, use_cuda)\r\n\r\n", "sub_path": "Main_train_step01.py", "file_name": "Main_train_step01.py", "file_ext": "py", "file_size_in_byte": 9177, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "PIL.ImageFile.LOAD_TRUNCATED_IMAGES", "line_number": 11, "usage_type": "attribute"}, {"api_name": "PIL.ImageFile", "line_number": 11, "usage_type": "name"}, {"api_name": "torchvision.datasets.ImageFolder", "line_number": 40, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 40, "usage_type": "name"}, {"api_name": "torchvision.datasets.ImageFolder", "line_number": 46, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 46, "usage_type": "name"}, {"api_name": "torchvision.datasets.ImageFolder", "line_number": 52, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 52, "usage_type": "name"}, {"api_name": "torchvision.models.resnet18", "line_number": 75, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 82, "usage_type": "name"}, {"api_name": "torch.nn.save", "line_number": 158, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 158, "usage_type": "name"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 168, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 168, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 169, "usage_type": "name"}, {"api_name": "torch.nn.load", "line_number": 182, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 182, "usage_type": "name"}, {"api_name": "torch.nn.save", "line_number": 185, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 185, "usage_type": "name"}, {"api_name": "torch.nn.save", "line_number": 187, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 187, "usage_type": "name"}]} +{"seq_id": "191716099", "text": "from django.conf.urls import patterns, include, url\n\nfrom django.contrib.gis import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # url(r'^$', 'quester.views.home', name='home'),\n # url(r'^quester/', include('quester.foo.urls')),\n\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', 'defaults.views.home', name='home'),\n url(r'^logout/$', 'defaults.views.logout', name='logout-page'),\n url(r'', include('social_auth.urls')),\n url(r'^ajax/get_user_location', 'defaults.views.get_user_location', name='get_user_location'),\n url(r'^ajax/set_user_location', 'defaults.views.set_user_location', name='set_user_location'),\n url(r'^ajax/nearest_quests', 'quest.views.nearest_quests', name='nearest_quests'),\n url(r'^ajax/quest_form', 'quest.views.quest_form', name='quest_form'),\n url(r'^ajax/marker_fullinfo', 'quest.views.marker_fullinfo', name='marker_fullinfo'),\n)\n", "sub_path": "src/quester/quester/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 976, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.contrib.gis.admin.autodiscover", "line_number": 4, "usage_type": "call"}, {"api_name": "django.contrib.gis.admin", "line_number": 4, "usage_type": "name"}, {"api_name": "django.conf.urls.patterns", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 11, "usage_type": "call"}, {"api_name": "django.contrib.gis.admin.site", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.admin", "line_number": 11, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "359033310", "text": "from datetime import datetime, timedelta\n\nfrom freezegun import freeze_time\n\nfrom app.dao.inbound_sms_dao import (\n dao_get_inbound_sms_for_service,\n dao_count_inbound_sms_for_service,\n delete_inbound_sms_created_more_than_a_week_ago,\n dao_get_inbound_sms_by_id\n)\nfrom tests.app.db import create_inbound_sms, create_service\n\nfrom app.models import InboundSms\n\n\ndef test_get_all_inbound_sms(sample_service):\n inbound = create_inbound_sms(sample_service)\n\n res = dao_get_inbound_sms_for_service(sample_service.id)\n assert len(res) == 1\n assert res[0] == inbound\n\n\ndef test_get_all_inbound_sms_when_none_exist(sample_service):\n res = dao_get_inbound_sms_for_service(sample_service.id)\n assert len(res) == 0\n\n\ndef test_get_all_inbound_sms_limits_and_orders(sample_service):\n with freeze_time('2017-01-01'):\n one = create_inbound_sms(sample_service)\n with freeze_time('2017-01-03'):\n three = create_inbound_sms(sample_service)\n with freeze_time('2017-01-02'):\n two = create_inbound_sms(sample_service)\n\n res = dao_get_inbound_sms_for_service(sample_service.id, limit=2)\n assert len(res) == 2\n assert res[0] == three\n assert res[0].created_at == datetime(2017, 1, 3)\n assert res[1] == two\n assert res[1].created_at == datetime(2017, 1, 2)\n\n\ndef test_get_all_inbound_sms_filters_on_service(notify_db_session):\n service_one = create_service(service_name='one')\n service_two = create_service(service_name='two')\n\n sms_one = create_inbound_sms(service_one)\n sms_two = create_inbound_sms(service_two)\n\n res = dao_get_inbound_sms_for_service(service_one.id)\n assert len(res) == 1\n assert res[0] == sms_one\n\n\ndef test_count_inbound_sms_for_service(notify_db_session):\n service_one = create_service(service_name='one')\n service_two = create_service(service_name='two')\n\n create_inbound_sms(service_one)\n create_inbound_sms(service_one)\n create_inbound_sms(service_two)\n\n assert dao_count_inbound_sms_for_service(service_one.id) == 2\n\n\n@freeze_time(\"2017-01-01 12:00:00\")\ndef test_should_delete_inbound_sms_older_than_seven_days(sample_service):\n older_than_seven_days = datetime.utcnow() - timedelta(days=7, seconds=1)\n create_inbound_sms(sample_service, created_at=older_than_seven_days)\n delete_inbound_sms_created_more_than_a_week_ago()\n\n assert len(InboundSms.query.all()) == 0\n\n\n@freeze_time(\"2017-01-01 12:00:00\")\ndef test_should_not_delete_inbound_sms_before_seven_days(sample_service):\n yesterday = datetime.utcnow() - timedelta(days=1)\n just_before_seven_days = datetime.utcnow() - timedelta(days=6, hours=23, minutes=59, seconds=59)\n older_than_seven_days = datetime.utcnow() - timedelta(days=7, seconds=1)\n\n create_inbound_sms(sample_service, created_at=yesterday)\n create_inbound_sms(sample_service, created_at=just_before_seven_days)\n create_inbound_sms(sample_service, created_at=older_than_seven_days)\n\n delete_inbound_sms_created_more_than_a_week_ago()\n\n assert len(InboundSms.query.all()) == 2\n\n\ndef test_get_inbound_sms_by_id_returns(sample_service):\n inbound = create_inbound_sms(sample_service)\n\n inbound_from_db = dao_get_inbound_sms_by_id(sample_service.id, inbound.id)\n\n assert inbound == inbound_from_db\n", "sub_path": "tests/app/dao/test_inbound_sms_dao.py", "file_name": "test_inbound_sms_dao.py", "file_ext": "py", "file_size_in_byte": 3276, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "tests.app.db.create_inbound_sms", "line_number": 17, "usage_type": "call"}, {"api_name": "app.dao.inbound_sms_dao.dao_get_inbound_sms_for_service", "line_number": 19, "usage_type": "call"}, {"api_name": "app.dao.inbound_sms_dao.dao_get_inbound_sms_for_service", "line_number": 25, "usage_type": "call"}, {"api_name": "freezegun.freeze_time", "line_number": 30, "usage_type": "call"}, {"api_name": "tests.app.db.create_inbound_sms", "line_number": 31, "usage_type": "call"}, {"api_name": "freezegun.freeze_time", "line_number": 32, "usage_type": "call"}, {"api_name": "tests.app.db.create_inbound_sms", "line_number": 33, "usage_type": "call"}, {"api_name": "freezegun.freeze_time", "line_number": 34, "usage_type": "call"}, {"api_name": "tests.app.db.create_inbound_sms", "line_number": 35, "usage_type": "call"}, {"api_name": "app.dao.inbound_sms_dao.dao_get_inbound_sms_for_service", "line_number": 37, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 42, "usage_type": "call"}, {"api_name": "tests.app.db.create_service", "line_number": 46, "usage_type": "call"}, {"api_name": "tests.app.db.create_service", "line_number": 47, "usage_type": "call"}, {"api_name": "tests.app.db.create_inbound_sms", "line_number": 49, "usage_type": "call"}, {"api_name": "tests.app.db.create_inbound_sms", "line_number": 50, "usage_type": "call"}, {"api_name": "app.dao.inbound_sms_dao.dao_get_inbound_sms_for_service", "line_number": 52, "usage_type": "call"}, {"api_name": "tests.app.db.create_service", "line_number": 58, "usage_type": "call"}, {"api_name": "tests.app.db.create_service", "line_number": 59, "usage_type": "call"}, {"api_name": "tests.app.db.create_inbound_sms", "line_number": 61, "usage_type": "call"}, {"api_name": "tests.app.db.create_inbound_sms", "line_number": 62, "usage_type": "call"}, {"api_name": "tests.app.db.create_inbound_sms", "line_number": 63, "usage_type": "call"}, {"api_name": "app.dao.inbound_sms_dao.dao_count_inbound_sms_for_service", "line_number": 65, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 70, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 70, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 70, "usage_type": "call"}, {"api_name": "tests.app.db.create_inbound_sms", "line_number": 71, "usage_type": "call"}, {"api_name": "app.dao.inbound_sms_dao.delete_inbound_sms_created_more_than_a_week_ago", "line_number": 72, "usage_type": "call"}, {"api_name": "app.models.InboundSms.query.all", "line_number": 74, "usage_type": "call"}, {"api_name": "app.models.InboundSms.query", "line_number": 74, "usage_type": "attribute"}, {"api_name": "app.models.InboundSms", "line_number": 74, "usage_type": "name"}, {"api_name": "freezegun.freeze_time", "line_number": 68, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 79, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 79, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 79, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 80, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 80, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 80, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 81, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 81, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 81, "usage_type": "call"}, {"api_name": "tests.app.db.create_inbound_sms", "line_number": 83, "usage_type": "call"}, {"api_name": "tests.app.db.create_inbound_sms", "line_number": 84, "usage_type": "call"}, {"api_name": "tests.app.db.create_inbound_sms", "line_number": 85, "usage_type": "call"}, {"api_name": "app.dao.inbound_sms_dao.delete_inbound_sms_created_more_than_a_week_ago", "line_number": 87, "usage_type": "call"}, {"api_name": "app.models.InboundSms.query.all", "line_number": 89, "usage_type": "call"}, {"api_name": "app.models.InboundSms.query", "line_number": 89, "usage_type": "attribute"}, {"api_name": "app.models.InboundSms", "line_number": 89, "usage_type": "name"}, {"api_name": "freezegun.freeze_time", "line_number": 77, "usage_type": "call"}, {"api_name": "tests.app.db.create_inbound_sms", "line_number": 93, "usage_type": "call"}, {"api_name": "app.dao.inbound_sms_dao.dao_get_inbound_sms_by_id", "line_number": 95, "usage_type": "call"}]} +{"seq_id": "78386416", "text": "from handlers.invoice_handler import add_invoice, all_invoices,\\\n remove_invoice, show_invoice, get_invoice_serial\nfrom utils.quit import quit_loop\n\n\n\ndef _get_input(text:str)->str:\n return input(text)\n\ndef _invoice_menu()->str:\n \"\"\"Display options for invoice mode\"\"\"\n invoice_menu = _get_input(\"\\t\\tChoose from the options:\\n\\t\\t\"\n \"(1). Display all invoices\\n\\t\\t\"\n \"(2). Display an invoice\\n\\t\\t\"\n \"(3). Delete an invoice\\n\\t\\t\"\n \"(q). Quit\\n\")\n return invoice_menu\n\ndef invoice_management_loop():\n \"\"\"Invoice management for database\"\"\"\n while True:\n chosen_menu = _invoice_menu()\n if quit_loop(chosen_menu):\n break\n elif chosen_menu == '1':\n all_invoices()\n\n elif chosen_menu == '2':\n invoice_serial = input(\"\\t\\tSearch for invoice by serial: \\n\")\n show_invoice(invoice_serial)\n elif chosen_menu == '3':\n invoice_serial = input(\"\\t\\tSearch for invoice by serial: \\n\")\n remove_invoice(invoice_serial)\n else:\n print(\"You did not enter q, 1, 2, 3 or 4\")\n", "sub_path": "invoice/interface/invoice_interface.py", "file_name": "invoice_interface.py", "file_ext": "py", "file_size_in_byte": 1225, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "utils.quit.quit_loop", "line_number": 23, "usage_type": "call"}, {"api_name": "handlers.invoice_handler.all_invoices", "line_number": 26, "usage_type": "call"}, {"api_name": "handlers.invoice_handler.show_invoice", "line_number": 30, "usage_type": "call"}, {"api_name": "handlers.invoice_handler.remove_invoice", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "267944036", "text": "# Copyright (c) 2014 Matthew Rocklin\n#\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# a. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# b. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# c. Neither the name of multipledispatch nor the names of its contributors\n# may be used to endorse or promote products derived from this software\n# without specific prior written permission.\n#\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR\n# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH\n# DAMAGE.\n#\n# --------------------------------------------------------------------------------------\n# MegEngine is Licensed under the Apache License, Version 2.0 (the \"License\")\n#\n# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# This file has been modified by Megvii (\"Megvii Modifications\").\n# All Megvii Modifications are Copyright (C) 2014-2020 Megvii Inc. All rights reserved.\n# --------------------------------------------------------------------------------------\n\nimport sys\nimport typing\nfrom collections import OrderedDict\n\n\ndef raises(err, lamda):\n try:\n lamda()\n return False\n except err:\n return True\n\n\ndef expand_tuples(L):\n \"\"\"\n\n >>> expand_tuples([1, (2, 3)])\n [(1, 2), (1, 3)]\n\n >>> expand_tuples([1, 2])\n [(1, 2)]\n \"\"\"\n if not L:\n return [()]\n elif not isinstance(L[0], tuple):\n rest = expand_tuples(L[1:])\n return [(L[0],) + t for t in rest]\n else:\n rest = expand_tuples(L[1:])\n return [(item,) + t for t in rest for item in L[0]]\n\n\n# Taken from theano/theano/gof/sched.py\n# Avoids licensing issues because this was written by Matthew Rocklin\ndef _toposort(edges):\n \"\"\" Topological sort algorithm by Kahn [1] - O(nodes + vertices)\n\n inputs:\n edges - a dict of the form {a: {b, c}} where b and c depend on a\n outputs:\n L - an ordered list of nodes that satisfy the dependencies of edges\n\n >>> _toposort({1: (2, 3), 2: (3, )})\n [1, 2, 3]\n\n Closely follows the wikipedia page [2]\n\n [1] Kahn, Arthur B. (1962), \"Topological sorting of large networks\",\n Communications of the ACM\n [2] http://en.wikipedia.org/wiki/Toposort#Algorithms\n \"\"\"\n incoming_edges = reverse_dict(edges)\n incoming_edges = OrderedDict((k, set(val)) for k, val in incoming_edges.items())\n S = OrderedDict.fromkeys(v for v in edges if v not in incoming_edges)\n L = []\n\n while S:\n n, _ = S.popitem()\n L.append(n)\n for m in edges.get(n, ()):\n assert n in incoming_edges[m]\n incoming_edges[m].remove(n)\n if not incoming_edges[m]:\n S[m] = None\n if any(incoming_edges.get(v, None) for v in edges):\n raise ValueError(\"Input has cycles\")\n return L\n\n\ndef reverse_dict(d):\n \"\"\"\n Reverses direction of dependence dict\n\n >>> d = {'a': (1, 2), 'b': (2, 3), 'c':()}\n >>> reverse_dict(d) # doctest: +SKIP\n {1: ('a',), 2: ('a', 'b'), 3: ('b',)}\n\n :note: dict order are not deterministic. As we iterate on the\n input dict, it make the output of this function depend on the\n dict order. So this function output order should be considered\n as undeterministic.\n\n \"\"\"\n result = OrderedDict()\n for key in d:\n for val in d[key]:\n result[val] = result.get(val, tuple()) + (key,)\n return result\n\n\n# Taken from toolz\n# Avoids licensing issues because this version was authored by Matthew Rocklin\ndef groupby(func, seq):\n \"\"\" Group a collection by a key function\n\n >>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank']\n >>> groupby(len, names) # doctest: +SKIP\n {3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}\n\n >>> iseven = lambda x: x % 2 == 0\n >>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP\n {False: [1, 3, 5, 7], True: [2, 4, 6, 8]}\n\n See Also:\n ``countby``\n \"\"\"\n\n d = OrderedDict()\n for item in seq:\n key = func(item)\n if key not in d:\n d[key] = list()\n d[key].append(item)\n return d\n\n\ndef typename(type):\n \"\"\"\n Get the name of `type`.\n\n Parameters\n ----------\n type : Union[Type, Tuple[Type]]\n\n Returns\n -------\n str\n The name of `type` or a tuple of the names of the types in `type`.\n\n Examples\n --------\n >>> typename(int)\n 'int'\n >>> typename((int, float))\n '(int, float)'\n \"\"\"\n try:\n return type.__name__\n except AttributeError:\n if len(type) == 1:\n return typename(*type)\n return \"(%s)\" % \", \".join(map(typename, type))\n\n\n# parse typing.Union\ndef parse_union(ann):\n if hasattr(typing, \"UnionMeta\"):\n if type(ann) is not typing.UnionMeta:\n return\n return ann.__union_params__\n elif hasattr(typing, \"_Union\"):\n if type(ann) is not typing._Union:\n return\n return ann.__args__\n elif hasattr(typing, \"_GenericAlias\"):\n if type(ann) is not typing._GenericAlias:\n if type(ann) is not typing.Union:\n return\n else:\n if ann.__origin__ is not typing.Union:\n return\n return ann.__args__\n elif hasattr(typing, \"Union\"):\n if typing.get_origin(ann) is not typing.Union:\n return\n return typing.get_args(ann)\n else:\n raise NotImplementedError(\"unsupported Python version\")\n", "sub_path": "imperative/python/megengine/core/tensor/multipledispatch/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 6683, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "collections.OrderedDict", "line_number": 95, "usage_type": "call"}, {"api_name": "collections.OrderedDict.fromkeys", "line_number": 96, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 96, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 126, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 150, "usage_type": "call"}, {"api_name": "typing.UnionMeta", "line_number": 190, "usage_type": "attribute"}, {"api_name": "typing._Union", "line_number": 194, "usage_type": "attribute"}, {"api_name": "typing._GenericAlias", "line_number": 198, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 199, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 202, "usage_type": "attribute"}, {"api_name": "typing.get_origin", "line_number": 206, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 206, "usage_type": "attribute"}, {"api_name": "typing.get_args", "line_number": 208, "usage_type": "call"}]} +{"seq_id": "433005435", "text": "# encoding: utf8\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app', '0002_customer_email'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='customer',\n name='email',\n field=models.EmailField(max_length=20),\n ),\n ]\n", "sub_path": "app/migrations/0003_auto_20150223_1754.py", "file_name": "0003_auto_20150223_1754.py", "file_ext": "py", "file_size_in_byte": 389, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.EmailField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "213263666", "text": "import json\nimport requests\nimport Modules.zapi.zapiheaders as headers\nimport datetime\nimport time\nimport Modules.data.db as db\nimport Modules.shield.FunctionalTests.functional_test_data as ftd\ndate = '2019-01-24'#datetime.datetime.today().strftime('%Y-%m-%d')\nprojectId = 13901\ncyclename= 'ATR'+date\nversion = -1\nBASE_URL = 'https://prod-api.zephyr4jiracloud.com/connect'\n# BASE_URL ='https://prod-play.zephyr4jiracloud.com/connect'\ncontentType1 ='application/json'\ncontentType2 ='text/plain'\npayload = {\n 'name': cyclename,\n \"build\": ftd.funtional_tests().get_build_number_for_date(date),\n 'environment': 'Staging',\n \"startDate\": date,\n \"endDate\": date,\n 'projectId': projectId,\n 'versionId': version\n}\n\n\n\n\nclass zapi():\n\n def is_json(self, data):\n try:\n json.loads(data)\n except ValueError:\n return False\n return True\n\n def getListOfCycles(self):\n cycleUrl = '/public/rest/api/1.0/cycles/search'\n # params = {\"projectId\": 13901, \"versionId\": -1, }\n params = {'expand': 'executionSummaries','projectId': projectId,'versionId': version,}\n queryString = 'expand=executionSummaries&projectId=13901&versionId=-1'\n header = headers.zapiheaders().getHeaders('GET', cycleUrl, contentType2, queryString=queryString)\n print (header)\n print(params)\n raw_result = requests.get(BASE_URL + cycleUrl, params=params, headers=header)\n print(raw_result.url)\n print(raw_result.status_code)\n if raw_result.status_code == 200:\n # JSON RESPONSE: convert response to JSON\n json_result = json.loads(raw_result.text)\n print(json.dumps(json_result, indent=4, sort_keys=True))\n return json_result\n else:\n print(raw_result.text)\n\n\n def createNewTestCycle(self):\n\n cycleUrl = '/public/rest/api/1.0/cycle'\n header = headers.zapiheaders().getHeaders('POST', cycleUrl, contentType1)\n print(payload)\n raw_result = requests.post(BASE_URL + cycleUrl, headers=header, json=payload)\n # print(raw_result.request)\n if raw_result.status_code == 200:\n\n # JSON RESPONSE: convert response to JSON\n json_result = json.loads(raw_result.text)\n\n # PRINT RESPONSE: pretty print with 4 indent\n print(json.dumps(json_result, indent=4, sort_keys=True))\n return json_result['id'], json_result['name']\n\n else:\n print(raw_result.text)\n\n def addtesttonewCycle(self, cycleId):\n path_add_tests = '/public/rest/api/1.0/executions/add/cycle/'\n cycleUrl = path_add_tests + cycleId\n header = headers.zapiheaders().getHeaders('POST', cycleUrl, contentType1)\n # MAKE REQUEST:\n jql = {\"jql\": \"project in (MFIN, MBR) AND issuetype = Test AND labels = Automated-CI ORDER BY created DESC\",\n \"assigneeType\": \"assignee\", \"assignee\": \"rchintapalli\", \"method\": 2, \"versionId\": version, \"projectId\": projectId}\n raw_result = requests.post(BASE_URL + path_add_tests + cycleId, headers=header, json=jql)\n if self.is_json(raw_result.text):\n\n # JSON RESPONSE: convert response to JSON\n json_result = json.loads(raw_result.text)\n\n # PRINT RESPONSE: pretty print with 4 indent\n # print(json.dumps(json_result, indent=4, sort_keys=True))\n\n else:\n print(raw_result.text)\n print(\"Waiting for the tests to be added to the cycle\")\n time.sleep(140)\n print(\"Waiting is finished. Getting the executions for status update\")\n\n def getExecutionsByCycleID(self, cycleId):\n RELATIVE_PATH_CYCLE = '/public/rest/api/1.0/executions/search/cycle/'\n exid = []\n offsets = ['0', '50','100','150', '200']\n for offset in offsets:\n params = {'offset': offset, \"projectId\": projectId, \"versionId\": version, }\n queryString = 'offset=' + offset + '&projectId=13901&versionId=-1'\n header = headers.zapiheaders().getHeaders('GET', RELATIVE_PATH_CYCLE + cycleId,contentType1, queryString=queryString)\n raw_result = requests.get(BASE_URL + RELATIVE_PATH_CYCLE + cycleId, params=params, headers=header)\n if raw_result.status_code == 200:\n # JSON RESPONSE: convert response to JSON\n json_result = json.loads(raw_result.text)\n executions = json_result['searchObjectList']\n ids = [[execution['execution']['id'], execution['execution']['issueId'], execution['issueKey']] for execution in executions]\n exid.append(ids)\n # print(ids)\n\n else:\n print(raw_result.text)\n exid_list = [item for sublist in exid for item in sublist]\n # print(len(exid_list))\n # print(exid_list)\n return exid_list\n\n def updateBulkStatus(self, cycleId):\n statusPayload = {\n 'executions': zapi().getExecutionsByCycleID(cycleId),\n 'status': 1,\n 'clearDefectMappingFlag': 'false',\n 'testStepStatusChangeFlag': 'true',\n 'stepStatus': 1\n }\n RELATIVE_PATH_EXECUTIONS = '/public/rest/api/1.0/executions'\n header = headers.zapiheaders().getHeaders(\"POST\", RELATIVE_PATH_EXECUTIONS, contentType1)\n raw_result = requests.post(BASE_URL + RELATIVE_PATH_EXECUTIONS, headers=header, json=statusPayload)\n if self.is_json(raw_result.text):\n # JSON RESPONSE: convert response to JSON\n json_result = json.loads(raw_result.text)\n\n # PRINT RESPONSE: pretty print with 4 indent\n # print(json.dumps(json_result, indent=4, sort_keys=True))\n\n else:\n print(raw_result.text)\n\n def updateExecution(self, cycleId, executionId, issueId, issueKey):\n\n status = self.getIssueStatus(issueKey)\n statusPayload ={\"status\":{\"id\":status},\n \"id\":executionId,\n \"projectId\":projectId,\n \"issueId\":issueId,\n \"cycleId\":cycleId,\n \"versionId\":version}\n RELATIVE_PATH_EXECUTION = '/public/rest/api/1.0/execution/'\n header = headers.zapiheaders().getHeaders(\"PUT\", RELATIVE_PATH_EXECUTION+executionId, contentType1)\n raw_result = requests.put(BASE_URL + RELATIVE_PATH_EXECUTION+executionId, headers=header, json=statusPayload)\n if self.is_json(raw_result.text):\n # JSON RESPONSE: convert response to JSON\n json_result = json.loads(raw_result.text)\n\n # PRINT RESPONSE: pretty print with 4 indent\n print(json.dumps(json_result, indent=4, sort_keys=True))\n\n else:\n print(raw_result.text)\n\n def getIssueStatus(self, issueKey):\n query = db.db().get_query_string_for('issueresult')\n res = db.db().execute_query(query, ('{}%'.format(date),))\n # res = db.db().get_isuue_result_for_date(date)\n status = [x[1]for x in res if x[0] == issueKey]\n if len(status) >= 1:\n if status[0].lower() == 'success':\n return 1\n elif status[0].lower() == 'failure':\n return 2\n else:\n return 0\n print (status[0])\n else:\n print('issue not found in CI test run')\n\n def createCycleAndUpdateResults(self):\n cycle = zapi().createNewTestCycle()\n query = db.db().get_query_string_for('issueresult')\n res = db.db().execute_query(query, ('{}%'.format(date),))\n # db.db().get_issue_result_for_date(date)\n timeout = time.time() + 60 * 4\n if not cycle == None:\n cycleId= cycle[0]\n cycleName = cycle[1]\n zapi().addtesttonewCycle(cycleId)\n executions = zapi().getExecutionsByCycleID(cycleId)\n executions = zapi().getExecutionsByCycleID(cycleId)\n # while not len(executions) == len(res) or time.time() > timeout:\n\n print (len(executions))\n # for execution in executions:\n # print(execution[2])\n for execution in executions:\n zapi().updateExecution(cycleId, execution[0], execution[1], execution[2])\n return 'Results posted to Jira with cycle name: '+ cycleName\n else:\n return \" Test cycle not created\"\n\n\n\n def gettestcases(self):\n cycleUrl = '/public/rest/api/1.0/chart/tests/created'\n # params = {\"projectId\": 13901, \"versionId\": -1, }\n params = {'daysPrevious':'365','periodName':'yearly','projectId':13901}\n queryString = 'daysPrevious=365&periodName=yearly&projectId=13901'\n header = headers.zapiheaders().getHeaders('GET', cycleUrl, contentType2, queryString=queryString)\n print(header)\n print(params)\n raw_result = requests.get(BASE_URL + cycleUrl, params=params, headers=header)\n print(raw_result.url)\n print(raw_result.status_code)\n if raw_result.status_code == 200:\n # JSON RESPONSE: convert response to JSON\n json_result = json.loads(raw_result.text)\n print(json.dumps(json_result, indent=4, sort_keys=True))\n # return json_result\n else:\n print(raw_result.text)\n", "sub_path": "Modules/zapi/PostResultsToZAPI.py", "file_name": "PostResultsToZAPI.py", "file_ext": "py", "file_size_in_byte": 9328, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "Modules.shield.FunctionalTests.functional_test_data.funtional_tests", "line_number": 18, "usage_type": "call"}, {"api_name": "Modules.shield.FunctionalTests.functional_test_data", "line_number": 18, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 33, "usage_type": "call"}, {"api_name": "Modules.zapi.zapiheaders.zapiheaders", "line_number": 43, "usage_type": "call"}, {"api_name": "Modules.zapi.zapiheaders", "line_number": 43, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 46, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 51, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 52, "usage_type": "call"}, {"api_name": "Modules.zapi.zapiheaders.zapiheaders", "line_number": 61, "usage_type": "call"}, {"api_name": "Modules.zapi.zapiheaders", "line_number": 61, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 63, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 68, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 71, "usage_type": "call"}, {"api_name": "Modules.zapi.zapiheaders.zapiheaders", "line_number": 80, "usage_type": "call"}, {"api_name": "Modules.zapi.zapiheaders", "line_number": 80, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 84, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 88, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 96, "usage_type": "call"}, {"api_name": "Modules.zapi.zapiheaders.zapiheaders", "line_number": 106, "usage_type": "call"}, {"api_name": "Modules.zapi.zapiheaders", "line_number": 106, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 107, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 110, "usage_type": "call"}, {"api_name": "Modules.zapi.zapiheaders.zapiheaders", "line_number": 132, "usage_type": "call"}, {"api_name": "Modules.zapi.zapiheaders", "line_number": 132, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 133, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 136, "usage_type": "call"}, {"api_name": "Modules.zapi.zapiheaders.zapiheaders", "line_number": 154, "usage_type": "call"}, {"api_name": "Modules.zapi.zapiheaders", "line_number": 154, "usage_type": "name"}, {"api_name": "requests.put", "line_number": 155, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 158, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 161, "usage_type": "call"}, {"api_name": "Modules.data.db.db", "line_number": 167, "usage_type": "call"}, {"api_name": "Modules.data.db", "line_number": 167, "usage_type": "name"}, {"api_name": "Modules.data.db.db", "line_number": 168, "usage_type": "call"}, {"api_name": "Modules.data.db", "line_number": 168, "usage_type": "name"}, {"api_name": "Modules.data.db.db", "line_number": 184, "usage_type": "call"}, {"api_name": "Modules.data.db", "line_number": 184, "usage_type": "name"}, {"api_name": "Modules.data.db.db", "line_number": 185, "usage_type": "call"}, {"api_name": "Modules.data.db", "line_number": 185, "usage_type": "name"}, {"api_name": "time.time", "line_number": 187, "usage_type": "call"}, {"api_name": "Modules.zapi.zapiheaders.zapiheaders", "line_number": 212, "usage_type": "call"}, {"api_name": "Modules.zapi.zapiheaders", "line_number": 212, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 215, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 220, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 221, "usage_type": "call"}]} +{"seq_id": "143780822", "text": "from BioSystem import BioSystem, Part,Rate\n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nsys2 = BioSystem()\nx = sys2.add_compositor('x', 1)\ny = sys2.add_compositor('y', 0.5)\n\nsys2.add_constant('u', 5)\n\nsys2.add_part(Part('Van der Pol wibbly-wobbly-timey-wimey (oscillator)', [x, y],\n [ Rate('y'), Rate('u*(1-x**2)*y-x')]))\n\ntime = np.linspace(0, 200, 200000)\ncolors = ['b', 'g', 'r']\nplt.figure(1)\nplt.subplot(211)\n\nind = 0\n\nfor mu in [0, 5, 25]:\n sys2.change_constant_value('u', mu)\n y = sys2.run_ode_int(time)\n plt.plot(time, y[:, sys2.compositor_idex('y')], colors[ind])\n ind += 1\nplt.legend(['u = 0', 'u = 5', 'u = 25'])\nplt.xlabel('Laikas')\nplt.ylabel('y')\nplt.title('Van der Pol osciliatorius skirtingiems parametrams u')\n\nplt.subplot(212)\nind = 0\n\nfor mu in [0, 10, 50]:\n sys2.change_constant_value('u', mu)\n y = sys2.run_ode_int(time)\n plt.plot(time, y[:, sys2.compositor_idex('x')], colors[ind])\n ind += 1\nplt.legend(['u = 0', 'u = 5', 'u = 25'])\nplt.xlabel('Laikas')\nplt.ylabel('x')\n\"\"\"\nplt.subplot(111)\nind = 0\n\nfor mu in [0, 5, 25]:\n sys2.change_constant_value('u', mu)\n y = sys2.run_ode_int(time)\n plt.plot(y[:, sys2.compositor_idex('x')], y[:, sys2.compositor_idex('y')], colors[ind])\n ind += 1\nplt.legend(['u = 0', 'u = 5', 'u = 25'])\nplt.xlabel('x')\nplt.ylabel('y')\n\"\"\"\nplt.show()\n", "sub_path": "bin/Linux/Van der Pol/Van_der_Pol.py", "file_name": "Van_der_Pol.py", "file_ext": "py", "file_size_in_byte": 1344, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "BioSystem.BioSystem", "line_number": 7, "usage_type": "call"}, {"api_name": "BioSystem.Part", "line_number": 13, "usage_type": "call"}, {"api_name": "BioSystem.Rate", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "280802973", "text": "from flask import Flask, request, render_template\nimport inlineflair\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef home():\n return render_template('form.html')\n\n\n@app.route('/code', methods=['POST'])\ndef convert():\n if request.form['mode'] == \"normal\":\n output = inlineflair.flairbot(request.form['comment'])\n elif request.form['mode'] == \"safe\":\n output = inlineflair.flairbot_safe(request.form['comment'])\n return render_template('code.html', output=output)\n\nif __name__ == '__main__':\n app.run()\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 527, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "flask.Flask", "line_number": 4, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 14, "usage_type": "name"}, {"api_name": "inlineflair.flairbot", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 16, "usage_type": "name"}, {"api_name": "inlineflair.flairbot_safe", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 17, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 17, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "630661253", "text": "# Copyright 2019 Geoffrey A. Reed. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied. See the License for the specific language governing\n# permissions and limitations under the License.\n# ----------------------------------------------------------------------\nimport json\n\nfrom tcia import _types\nfrom tcia import _utils\n\n\n__all__ = [\n \"CollectionsResource\",\n \"ModalitiesResource\",\n \"BodyPartsResource\",\n \"ManufacturersResource\",\n \"PatientsResource\",\n \"PatientsByModalityResource\",\n \"PatientStudiesResource\",\n \"ImagesResource\",\n \"NewPatientsInCollectionResource\",\n \"NewStudiesInPatientCollectionResource\",\n \"SOPInstanceUIDsResource\",\n \"SingleImageResource\",\n \"ContentsByNameResource\",\n]\n\n\nclass _Resource:\n\n _required_params = []\n\n def __init__(self, api_key, base_url, *, resource, endpoint):\n self._api_key = api_key\n self._base_url = base_url\n self._resource = resource\n self._endpoint = endpoint\n self._headers = {\"api_key\": api_key}\n self._url = f\"{base_url}/{resource}/query/{endpoint}\"\n self._params = {}\n self._metadata = None\n\n def __repr__(self):\n return (\n f\"{self.__class__.__name__}('{self._api_key}', '{self._base_url}', \"\n f\"resource='{self._resource}'', endpoint='{self._endpoint}')\"\n )\n\n def __call__(self):\n return self\n\n @classmethod\n def _check_required_params(cls, params):\n for param in cls._required_params:\n if params.get(param) is None:\n raise TypeError(\n f\"required param '{param}' must be set using \"\n f\"{cls.__name__}().__call__() prior to call to \"\n f\"{cls.__name__}().get() or {cls.__name__}().download()\"\n )\n\n @property\n def metadata(self):\n if self._metadata is None:\n url = f\"{self._url}/metadata\"\n text = _utils.get_text(url, headers=self._headers)\n data = json.loads(text)\n metadata = _types.Metadata(\n query_name=data[\"QueryName\"],\n description=data[\"Description\"],\n parameters=[param for param in data[\"Parameters\"]],\n result=_types.Result(\n name=data[\"Result\"][\"Name\"],\n description=data[\"Result\"][\"Description\"],\n attributes=[\n _types.Attribute(\n name=attr[\"Name\"],\n description=attr[\"Description\"],\n dicom=attr[\"DICOM\"],\n )\n for attr in data[\"Result\"][\"Attributes\"]\n ],\n ),\n )\n self._metadata = metadata\n return self._metadata\n\n\nclass _TextResource(_Resource):\n\n _formats = [\"csv\", \"html\", \"xml\", \"json\"]\n _required_params = []\n\n @classmethod\n def _check_format(cls, format_):\n if not format_ in cls._formats:\n raise TypeError(\n f\"invalid format_ '{format_}': try one of {cls._formats}\"\n )\n\n def get(self):\n self.__class__._check_required_params(self._params)\n self._params.update({\"format\": \"json\"})\n text = _utils.get_text(\n self._url, headers=self._headers, params=self._params\n )\n data = json.loads(text)\n return data\n\n def download(\n self, path_or_buffer, format_=\"csv\", *, mode=\"wt\", encoding=\"utf-8\"\n ):\n self.__class__._check_required_params(self._params)\n self.__class__._check_format(format_)\n self._params.update({\"format\": format_})\n text = _utils.get_text(\n self._url, headers=self._headers, params=self._params\n )\n _utils.write_text(text, path_or_buffer, mode=mode, encoding=encoding)\n\n\nclass _BytesResource(_Resource):\n\n _required_params = []\n\n def download(self, path_or_buffer, chunk_size=1024, *, mode=\"wb\"):\n self.__class__._check_required_params(self._params)\n content_iter = _utils.get_content_iter(\n self._url,\n headers=self._headers,\n params=self._params,\n chunk_size=chunk_size,\n )\n _utils.write_streaming_content(content_iter, path_or_buffer, mode=mode)\n\n\nclass CollectionsResource(_TextResource):\n def __init__(\n self,\n api_key,\n base_url,\n *,\n resource=\"TCIA\",\n endpoint=\"getCollectionValues\",\n ):\n super().__init__(\n api_key, base_url, resource=resource, endpoint=endpoint\n )\n\n def get(self):\n data = super().get()\n collections = [\n _types.Collection(collection=element.get(\"Collection\"))\n for element in data\n ]\n return collections\n\n\nclass ModalitiesResource(_TextResource):\n def __init__(\n self,\n api_key,\n base_url,\n *,\n resource=\"TCIA\",\n endpoint=\"getModalityValues\",\n ):\n super().__init__(\n api_key, base_url, resource=resource, endpoint=endpoint\n )\n\n def __call__(self, *, collection=None, body_part_examined=None):\n self._params.update(\n {\"Collection\": collection, \"BodyPartExamined\": body_part_examined}\n )\n return self\n\n def get(self):\n data = super().get()\n modalities = [\n _types.Modality(modality=element.get(\"Modality\"))\n for element in data\n ]\n return modalities\n\n\nclass BodyPartsExaminedResource(_TextResource):\n def __init__(\n self,\n api_key,\n base_url,\n *,\n resource=\"TCIA\",\n endpoint=\"getBodyPartValues\",\n ):\n super().__init__(\n api_key, base_url, resource=resource, endpoint=endpoint\n )\n\n def __call__(self, *, collection=None, modality=None):\n self._params.update({\"Collection\": collection, \"Modality\": modality})\n return self\n\n def get(self):\n data = super().get()\n body_parts_examined = [\n _types.BodyPartExamined(\n body_part_examined=element.get(\"BodyPartExamined\")\n )\n for element in data\n ]\n return body_parts_examined\n\n\nclass ManufacturersResource(_TextResource):\n def __init__(\n self,\n api_key,\n base_url,\n *,\n resource=\"TCIA\",\n endpoint=\"getManufacturerValues\",\n ):\n super().__init__(\n api_key, base_url, resource=resource, endpoint=endpoint\n )\n\n def __call__(\n self, *, collection=None, modality=None, body_part_examined=None\n ):\n self._params.update(\n {\n \"Collection\": collection,\n \"Modality\": modality,\n \"BodyPartExamined\": body_part_examined,\n }\n )\n return self\n\n def get(self):\n data = super().get()\n manufacturers = [\n _types.Manufacturer(manufacturer=element.get(\"Manufacturer\"))\n for element in data\n ]\n return manufacturers\n\n\nclass PatientsResource(_TextResource):\n def __init__(\n self, api_key, base_url, *, resource=\"TCIA\", endpoint=\"getPatient\"\n ):\n super().__init__(\n api_key, base_url, resource=resource, endpoint=endpoint\n )\n\n def __call__(self, *, collection=None):\n self._params.update({\"Collection\": collection})\n return self\n\n def get(self):\n data = super().get()\n patients = [\n _types.Patient(\n patient_id=element.get(\"PatientID\"),\n patient_name=element.get(\"PatientName\"),\n patient_sex=element.get(\"PatientSex\"),\n collection=element.get(\"Collection\"),\n )\n for element in data\n ]\n return patients\n\n\nclass PatientsByModalityResource(_TextResource):\n\n _required_params = [\"Collection\", \"Modality\"]\n\n def __init__(\n self,\n api_key,\n base_url,\n *,\n resource=\"TCIA\",\n endpoint=\"PatientsByModality\",\n ):\n super().__init__(\n api_key, base_url, resource=resource, endpoint=endpoint\n )\n\n def __call__(self, *, collection, modality):\n self._params.update({\"Collection\": collection, \"Modality\": modality})\n return self\n\n def get(self):\n data = super().get()\n patient_by_modality = [\n _types.PatientByModality(\n patient_id=element.get(\"PatientID\"),\n patient_name=element.get(\"PatientName\"),\n patient_sex=element.get(\"PatientSex\"),\n collection=element.get(\"Collection\"),\n )\n for element in data\n ]\n return patients\n\n\nclass PatientStudiesResource(_TextResource):\n def __init__(\n self, api_key, base_url, *, resource=\"TCIA\", endpoint=\"getPatientStudy\"\n ):\n super().__init__(\n api_key, base_url, resource=resource, endpoint=endpoint\n )\n\n def __call__(\n self, *, collection=None, patient_id=None, study_instance_uid=None\n ):\n self._params.update(\n {\n \"Collection\": collection,\n \"PatientID\": patient_id,\n \"StudyInstanceUID\": study_instance_uid,\n }\n )\n return self\n\n def get(self):\n data = super().get()\n patient_studies = [\n _types.PatientStudy(\n study_instance_uid=element.get(\"StudyInstanceUID\"),\n study_date=element.get(\"StudyDate\"),\n study_description=element.get(\"StudyDescription\"),\n patient_age=element.get(\"PatientAge\"),\n patient_id=element.get(\"PatientID\"),\n patient_name=element.get(\"PatientName\"),\n patient_sex=element.get(\"PatientSex\"),\n collection=element.get(\"Collection\"),\n series_count=element.get(\"SeriesCount\"),\n )\n for element in data\n ]\n return patient_studies\n\n\nclass SeriesResource(_TextResource):\n def __init__(\n self, api_key, base_url, *, resource=\"TCIA\", endpoint=\"getSeries\"\n ):\n super().__init__(\n api_key, base_url, resource=resource, endpoint=endpoint\n )\n\n def __call__(\n self,\n *,\n collection=None,\n study_instance_uid=None,\n patient_id=None,\n series_instance_uid=None,\n modality=None,\n manufacturer_model_name=None,\n manufacturer=None,\n ):\n self._params.update(\n {\n \"Collection\": collection,\n \"StudyInstanceUID\": study_instance_uid,\n \"PatientID\": patient_id,\n \"SeriesInstanceUID\": series_instance_uid,\n \"Modality\": modality,\n \"ManufacturerModelName\": manufacturer_model_name,\n \"Manufacturer\": manufacturer,\n }\n )\n return self\n\n def get(self):\n data = super().get()\n series_list = [\n _types.Series(\n series_instance_uid=element.get(\"SeriesInstanceUID\"),\n study_instance_uid=element.get(\"StudyInstanceUID\"),\n modality=element.get(\"Modality\"),\n protocol_name=element.get(\"ProtocolName\"),\n series_date=element.get(\"SeriesDate\"),\n series_description=element.get(\"SeriesDescription\"),\n body_part_examined=element.get(\"BodyPartExamined\"),\n series_number=element.get(\"SeriesNumber\"),\n annotations_flag=element.get(\"AnnotationsFlag\"),\n collection=element.get(\"Collection\"),\n patient_id=element.get(\"PatientID\"),\n manufacturer=element.get(\"Manufacturer\"),\n manufacturer_model_name=element.get(\"ManufacturerModelName\"),\n software_version=element.get(\"SoftwareVersion\"),\n image_count=element.get(\"ImageCount\"),\n )\n for element in data\n ]\n return series_list\n\n\nclass SeriesSizeResource(_TextResource):\n\n _required_params = [\"SeriesInstanceUID\"]\n\n def __init__(\n self, api_key, base_url, *, resource=\"TCIA\", endpoint=\"getSeriesSize\"\n ):\n super().__init__(\n api_key, base_url, resource=resource, endpoint=endpoint\n )\n\n def __call__(self, *, series_instance_uid):\n self._params.update({\"SeriesInstanceUID\": series_instance_uid})\n return self\n\n def get(self):\n data = super().get()\n series_sizes = [\n _types.SeriesSize(\n total_size_in_bytes=element.get(\"TotalSizeInBytes\"),\n object_count=element.get(\"ObjectCount\"),\n )\n for element in data\n ]\n return series_sizes\n\n\nclass ImagesResource(_BytesResource):\n\n _required_params = [\"SeriesInstanceUID\"]\n\n def __init__(\n self, api_key, base_url, *, resource=\"TCIA\", endpoint=\"getImage\"\n ):\n super().__init__(\n api_key, base_url, resource=resource, endpoint=endpoint\n )\n\n def __call__(self, *, series_instance_uid):\n self._params.update({\"SeriesInstanceUID\": series_instance_uid})\n return self\n\n\nclass NewPatientsInCollectionResource(_TextResource):\n\n _required_params = [\"Date\", \"Collection\"]\n\n def __init__(\n self,\n api_key,\n base_url,\n *,\n resource=\"TCIA\",\n endpoint=\"NewPatientsInCollection\",\n ):\n super().__init__(\n api_key, base_url, resource=resource, endpoint=endpoint\n )\n\n def __call__(self, *, date, collection):\n self._params.update({\"Date\": date, \"Collection\": collection})\n self._configured = True\n return self\n\n def get(self):\n data = super().get()\n new_patients_in_collection = [\n _types.NewPatientInCollection(\n patient_id=element.get(\"PatientID\"),\n collection=element.get(\"Collection\"),\n )\n for element in data\n ]\n return new_patients_in_collection\n\n\nclass NewStudiesInPatientCollectionResource(_TextResource):\n\n _required_params = [\"Date\", \"Collection\"]\n\n def __init__(\n self,\n api_key,\n base_url,\n *,\n resource=\"TCIA\",\n endpoint=\"NewStudiesInPatientCollection\",\n ):\n super().__init__(\n api_key, base_url, resource=resource, endpoint=endpoint\n )\n\n def __call__(self, *, date, collection, patient_id=None):\n self._params.update(\n {\"Date\": date, \"Collection\": collection, \"PateintID\": patient_id}\n )\n return self\n\n def get(self):\n data = super().get()\n new_studies_in_patient_collection = [\n _types.NewStudyInPatientCollection(\n patient_id=element.get(\"PatientID\"),\n collection=element.get(\"Collection\"),\n study_instance_uid=element.get(\"StudyInstanceUID\"),\n )\n for element in data\n ]\n return new_studies_in_patient_collection\n\n\nclass SOPInstanceUIDsResource(_TextResource):\n\n _required_params = [\"SeriesInstanceUID\"]\n\n def __init__(\n self,\n api_key,\n base_url,\n *,\n resource=\"TCIA\",\n endpoint=\"getSOPInstanceUIDs\",\n ):\n super().__init__(\n api_key, base_url, resource=resource, endpoint=endpoint\n )\n\n def __call__(self, *, series_instance_uid):\n self._params.update({\"SeriesInstanceUID\": series_instance_uid})\n return self\n\n def get(self):\n data = super().get()\n sop_instance_uids = [\n _types.SOPInstanceUID(\n # API documentation inconsistent: \"sop_instance_uid\" not\n # \"SOPInstanceUID\". Reason unknown.\n sop_instance_uid=element.get(\"sop_instance_uid\")\n )\n for element in data\n ]\n return sop_instance_uids\n\n\nclass SingleImageResource(_BytesResource):\n\n _required_params = [\"SeriesInstanceUID\", \"SOPInstanceUID\"]\n\n def __init__(\n self, api_key, base_url, *, resource=\"TCIA\", endpoint=\"getSingleImage\"\n ):\n super().__init__(\n api_key, base_url, resource=resource, endpoint=endpoint\n )\n\n def __call__(self, *, series_instance_uid, sop_instance_uid):\n self._params.update(\n {\n \"SeriesInstanceUID\": series_instance_uid,\n \"SOPInstanceUID\": sop_instance_uid,\n }\n )\n return self\n\n\nclass ContentsByNameResource(_TextResource):\n\n _required_params = [\"name\"]\n\n def __init__(\n self,\n api_key,\n base_url,\n *,\n resource=\"SharedList\",\n endpoint=\"ContentsByName\",\n ):\n super().__init__(\n api_key, base_url, resource=resource, endpoint=endpoint\n )\n\n def __call__(self, *, name):\n self._params.update({\"name\": name})\n return self\n\n def get(self):\n data = super().get()\n return data\n", "sub_path": "src/tcia/_resources.py", "file_name": "_resources.py", "file_ext": "py", "file_size_in_byte": 17571, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "tcia._utils.get_text", "line_number": 75, "usage_type": "call"}, {"api_name": "tcia._utils", "line_number": 75, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 76, "usage_type": "call"}, {"api_name": "tcia._types.Metadata", "line_number": 77, "usage_type": "call"}, {"api_name": "tcia._types", "line_number": 77, "usage_type": "name"}, {"api_name": "tcia._types.Result", "line_number": 81, "usage_type": "call"}, {"api_name": "tcia._types", "line_number": 81, "usage_type": "name"}, {"api_name": "tcia._types.Attribute", "line_number": 85, "usage_type": "call"}, {"api_name": "tcia._types", "line_number": 85, "usage_type": "name"}, {"api_name": "tcia._utils.get_text", "line_number": 113, "usage_type": "call"}, {"api_name": "tcia._utils", "line_number": 113, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 116, "usage_type": "call"}, {"api_name": "tcia._utils.get_text", "line_number": 125, "usage_type": "call"}, {"api_name": "tcia._utils", "line_number": 125, "usage_type": "name"}, {"api_name": "tcia._utils.write_text", "line_number": 128, "usage_type": "call"}, {"api_name": "tcia._utils", "line_number": 128, "usage_type": "name"}, {"api_name": "tcia._utils.get_content_iter", "line_number": 137, "usage_type": "call"}, {"api_name": "tcia._utils", "line_number": 137, "usage_type": "name"}, {"api_name": "tcia._utils.write_streaming_content", "line_number": 143, "usage_type": "call"}, {"api_name": "tcia._utils", "line_number": 143, "usage_type": "name"}, {"api_name": "tcia._types.Collection", "line_number": 162, "usage_type": "call"}, {"api_name": "tcia._types", "line_number": 162, "usage_type": "name"}, {"api_name": "tcia._types.Modality", "line_number": 190, "usage_type": "call"}, {"api_name": "tcia._types", "line_number": 190, "usage_type": "name"}, {"api_name": "tcia._types.BodyPartExamined", "line_number": 216, "usage_type": "call"}, {"api_name": "tcia._types", "line_number": 216, "usage_type": "name"}, {"api_name": "tcia._types.Manufacturer", "line_number": 252, "usage_type": "call"}, {"api_name": "tcia._types", "line_number": 252, "usage_type": "name"}, {"api_name": "tcia._types.Patient", "line_number": 273, "usage_type": "call"}, {"api_name": "tcia._types", "line_number": 273, "usage_type": "name"}, {"api_name": "tcia._types.PatientByModality", "line_number": 307, "usage_type": "call"}, {"api_name": "tcia._types", "line_number": 307, "usage_type": "name"}, {"api_name": "tcia._types.PatientStudy", "line_number": 341, "usage_type": "call"}, {"api_name": "tcia._types", "line_number": 341, "usage_type": "name"}, {"api_name": "tcia._types.Series", "line_number": 392, "usage_type": "call"}, {"api_name": "tcia._types", "line_number": 392, "usage_type": "name"}, {"api_name": "tcia._types.SeriesSize", "line_number": 432, "usage_type": "call"}, {"api_name": "tcia._types", "line_number": 432, "usage_type": "name"}, {"api_name": "tcia._types.NewPatientInCollection", "line_number": 481, "usage_type": "call"}, {"api_name": "tcia._types", "line_number": 481, "usage_type": "name"}, {"api_name": "tcia._types.NewStudyInPatientCollection", "line_number": 515, "usage_type": "call"}, {"api_name": "tcia._types", "line_number": 515, "usage_type": "name"}, {"api_name": "tcia._types.SOPInstanceUID", "line_number": 548, "usage_type": "call"}, {"api_name": "tcia._types", "line_number": 548, "usage_type": "name"}]} +{"seq_id": "492881436", "text": "import datetime\nimport glob\nimport multiprocessing\nimport os\nimport shutil\nfrom concurrent.futures import ThreadPoolExecutor\nfrom os import listdir\nfrom os.path import isfile, join\n\n# from ekstep_data_pipelines.common.utils import get_logger\nfrom google.cloud import storage\n\n# Logger = get_logger(\"GCS Operations\")\n\n\nclass CloudStorageOperations:\n @staticmethod\n def get_instance(config_dict, **kwargs):\n gcs_instance = CloudStorageOperations(config_dict, **kwargs)\n return gcs_instance\n\n def __init__(self, config_dict, **kwargs):\n self.config_dict = config_dict\n self._bucket = None\n self._client = None\n\n @property\n def client(self):\n if self._client:\n return self._client\n\n self._client = storage.Client()\n return self._client\n\n @property\n def bucket(self):\n if self._bucket:\n return self._bucket\n\n # if not self.config_dict:\n # self.setup_peripherals()\n\n self._bucket = (\n self.config_dict.get(\"common\", {})\n .get(\"gcs_config\", {})\n .get(\"master_bucket\")\n )\n return self._bucket\n\n def check_path_exists(self, path):\n bucket = self.client.bucket(self.bucket)\n stats = storage.Blob(bucket=bucket, name=path).exists(self.client)\n return stats\n\n def copy_all_files(self, src, dest, audio_extn):\n src_files = glob.glob(src + \"/*.\" + audio_extn)\n print(\"*******src_files***\", src, src_files, audio_extn)\n for file_name in src_files:\n meta_file_name = (\n \"/\".join(file_name.split(\"/\")[:-1])\n + \"/\"\n + file_name.split(\"/\")[-1].split(\".\")[0]\n + \".csv\"\n )\n full_meta_file_name = os.path.join(src, meta_file_name)\n full_file_name = os.path.join(src, file_name)\n print(\"*******full_meta_file_name****\", full_meta_file_name)\n print(\"*******full_file_name****\", full_file_name)\n if os.path.isfile(full_file_name) and os.path.isfile(full_meta_file_name):\n destination = dest + \"/\" + self.get_audio_id()\n self.make_directories(destination)\n print(\"****dest***\", destination)\n shutil.copy(full_file_name, destination)\n shutil.copy(full_meta_file_name, destination)\n\n def get_audio_id(self):\n return datetime.datetime.now().strftime(\"%Y%m%d%H%M%S%f\")[:-2]\n\n def make_directories(self, path):\n if not os.path.exists(path):\n os.makedirs(path)\n print(\"Directory {} created successfully\".format(path))\n else:\n print(\"Directory {} already exists\".format(path))\n\n def download_to_local(\n self, source_blob_name, destination, is_directory, exclude_extn=None\n ):\n \"\"\"Downloads a blob from the bucket.\"\"\"\n # Provides options to download a file OR folder\n # Option 1: FILE mode: Download a file - copies a file with same name in destination folder\n # bucket_name = \"your-bucket-name\"\n # source_blob_name = \"storage-object-name\" e.g. \"data/raw/curation/\"\n # \"tobeprocessed/hindi/f10.txt\"\n # destination = \"local/path/to/folder\" e.g. \"data/raw/curation/tobeprocessed/hindi/f10.txt\"\n # isDirectory = flag to specify whether source is Directory OR File\n\n # Option 2: DIRECTORY mode: Download all files inside a folder - creates destination\n # local dir if not exists and copies all files from source\n # bucket_name = \"your-bucket-name\"\n # source_blob_name = \"storage-object-name\" e.g. \"data/raw/curation/tobeprocessed/hindi\"\n # destination = \"local/path/to/folder\" e.g. \"data/raw/curation/tobeprocessed/hindi\"\n # isDirectory = flag to specify whether source is Directory OR File\n\n print(\"Creating storage client object\")\n storage_client = storage.Client()\n if is_directory:\n # Create destination directories if not exists\n print(\"Running in DIRECTORY mode...\")\n print(\"Creating destination directories if not exists\")\n\n self.make_directories(destination)\n print(\n \"Fetching all blobs list from Bucket: {} and Source: {}\".format(\n self.bucket, source_blob_name\n )\n )\n\n blobs = list(\n storage_client.list_blobs(self.bucket, prefix=source_blob_name)\n )\n print(\"Fetched all blobs list successfully\")\n print(\n \"Will exclude {} extension file while copying to local destination\".format(\n exclude_extn\n )\n )\n\n for blob in blobs:\n if (not blob.name.endswith(\"/\")) & (\n not blob.name[blob.name.rfind(\"/\") + 1 : len(blob.name)].split(\".\")[\n 1\n ]\n == exclude_extn\n ):\n print(\n \"Downloading blob {}/{} to local directory: {}: \".format(\n self.bucket, blob.name, destination\n )\n )\n blob.download_to_filename(\n destination + \"/\" + blob.name.split(\"/\")[-1]\n )\n print(\"Blob downloaded successfully: {}\".format(blob.name))\n else:\n print(\"Running in FILE mode...\")\n\n # Get the Destination directory from input\n destination_directory = destination[0 : destination.rfind(\"/\")]\n print(\n \"Destination directory to be used for file download: {}\".format(\n destination_directory\n )\n )\n print(\"Creating destination directories if not exists\")\n self.make_directories(destination_directory)\n\n bucket = storage_client.bucket(self.bucket)\n src_blob = bucket.blob(source_blob_name)\n\n # Download the file\n print(\n \"Downloading file {} to destination: {}\".format(\n source_blob_name, destination_directory\n )\n )\n src_blob.download_to_filename(destination)\n print(\n \"File {}/{} downloaded to destination directory {} successfully\".format(\n self.bucket, source_blob_name, destination_directory\n )\n )\n\n def upload_to_gcs(\n self, local_source_path, destination_blob_name, upload_directory=True\n ):\n \"\"\"\n Uploads a blob from the local.\n\n :param string local_source_path: Local path to the file/directory being uploaded.\n Must include the file name incase of file upload\n\n :param string destination_blob_name: Remote path where the file/directory needs\n to be uploaded to\n\n :param bool upload_directy: Flag for specifying if the function is being used to\n upload a file or a directory. Pass false incase of file\n\n \"\"\"\n\n bucket = self.client.bucket(self.bucket)\n\n if not upload_directory:\n Logger.info(\n \"Uploading file from source: %s to destination: \" \"%f/%f\",\n local_source_path,\n self.bucket,\n destination_blob_name,\n )\n blob = bucket.blob(destination_blob_name)\n try:\n blob.upload_from_filename(local_source_path)\n # W0703: Catching too general exception Exception (broad-except)\n except Exception as exception:\n Logger.info(\n \"Single file Upload failed with error %s\", exception.__str__()\n )\n return False\n\n Logger.info(\n \"Single File uploaded successfully to %f/%f\",\n self.bucket,\n destination_blob_name,\n )\n return True\n\n files = [\n f for f in listdir(local_source_path) if isfile(join(local_source_path, f))\n ]\n Logger.info(\"All the files in directory %s\", files)\n # TODO: move to constant and pass concurrency as args\n estimated_cpu_share = 0.05\n concurrency = multiprocessing.cpu_count() / estimated_cpu_share\n executor = ThreadPoolExecutor(max_workers=concurrency)\n\n futures = []\n\n for file in files:\n src_file = local_source_path + \"/\" + file\n blob = bucket.blob(destination_blob_name + \"/\" + file)\n Logger.info(\n \"Uploading files from source: %s to destination: %s/%s \",\n src_file,\n self.bucket,\n blob.name,\n )\n futures.append(executor.submit(blob.upload_from_filename, src_file))\n\n executor.shutdown(wait=True)\n\n Logger.info(\"Checking the result of all upload values\")\n\n for upload_future in futures:\n try:\n upload_future.result()\n except Exception as exception:\n Logger.error(\n \"Uploading directory %s failed with error %s\",\n local_source_path,\n exception.__str__(),\n )\n return False\n\n Logger.info(\n \"All the files in directory %s uploaded successfully\", local_source_path\n )\n return True\n\n def list_blobs(self, bucket_name, prefix, delimiter=None):\n \"\"\"Lists all the blobs in the bucket.\"\"\"\n # bucket_name = \"your-bucket-name\"\n\n storage_client = storage.Client()\n # Note: Client.list_blobs requires at least package version 1.17.0.\n blobs = storage_client.list_blobs(\n bucket_name, prefix=prefix, delimiter=delimiter\n )\n\n for blob in blobs:\n print(blob.name)\n if delimiter:\n print(\"Prefixes:\")\n for prefix_ in blobs.prefixes:\n print(prefix_)\n\n def rename_blob(self, bucket_name, blob_name, new_name):\n \"\"\"Renames a blob.\"\"\"\n # bucket_name = \"your-bucket-name\"\n # blob_name = \"your-object-name\"\n # new_name = \"new-object-name\"\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(blob_name)\n\n new_blob = bucket.rename_blob(blob, new_name)\n\n print(\n \"Blob {}/{} has been renamed to {}\".format(\n bucket_name, blob.name, new_blob.name\n )\n )\n\n def copy_blob(self, blob_name, destination_blob_name, destination_bucket_name=None):\n \"\"\"Copies a blob from one bucket to another with a new name.\"\"\"\n # bucket_name = \"your-bucket-name\"\n # blob_name = \"your-object-name\"\n # destination_bucket_name = \"destination-bucket-name\"\n # destination_blob_name = \"destination-object-name\"\n if not destination_bucket_name:\n destination_bucket_name = self.bucket\n\n storage_client = storage.Client()\n\n source_bucket = storage_client.bucket(self.bucket)\n source_blob = source_bucket.blob(blob_name)\n destination_bucket = storage_client.bucket(destination_bucket_name)\n\n blob_copy = source_bucket.copy_blob(\n source_blob, destination_bucket, destination_blob_name\n )\n\n print(\n \"Blob {} in bucket {} copied to blob {} in bucket {}.\".format(\n source_blob.name,\n source_bucket.name,\n blob_copy.name,\n destination_bucket.name,\n )\n )\n\n def list_blobs_in_a_path(self, file_prefix, delimiter=None):\n \"\"\"Lists all the blobs in the bucket.\"\"\"\n # bucket_name = \"your-bucket-name\"\n print(\"*****File prefix is ***** \" + file_prefix)\n storage_client = storage.Client()\n\n # Note: Client.list_blobs requires at least package version 1.17.0.\n blobs = storage_client.list_blobs(\n self.bucket, prefix=file_prefix, delimiter=delimiter\n )\n return blobs\n\n def move_blob(self, blob_name, destination_blob_name, destination_bucket_name=None):\n\n if not destination_bucket_name:\n destination_bucket_name = self.bucket\n is_path_exists = self.check_path_exists(blob_name)\n if is_path_exists:\n source_blob = self.copy_blob_for_move(\n self.bucket, blob_name, destination_bucket_name, destination_blob_name\n )\n source_blob.delete()\n print(\"Blob {} deleted.\".format(source_blob))\n print(\"***Move Success***Blob {} copied.\".format(source_blob))\n else:\n print(\n \"***Move Failed***.Blob {} in bucket {} does not exist.\".format(\n blob_name,\n self.bucket)\n )\n\n def copy_blob_file(self, blob_name, destination_blob_name, destination_bucket_name=None):\n\n if not destination_bucket_name:\n destination_bucket_name = self.bucket\n is_path_exists = self.check_path_exists(blob_name)\n if is_path_exists:\n source_blob = self.copy_blob_for_move(\n self.bucket, blob_name, destination_bucket_name, destination_blob_name\n )\n print(\"***Copy Success***Blob {} copied.\".format(source_blob))\n else:\n print(\n \"***Copy Failed***.Blob {} in bucket {} does not exist.\".format(\n blob_name,\n self.bucket)\n )\n\n @staticmethod\n def copy_blob_for_move(\n bucket_name, blob_name, destination_bucket_name, destination_blob_name\n ):\n \"\"\"Copies a blob from one bucket to another with a new name.\"\"\"\n # bucket_name = \"your-bucket-name\"\n # blob_name = \"your-object-name\"\n # destination_bucket_name = \"destination-bucket-name\"\n # destination_blob_name = \"destination-object-name\"\n\n storage_client = storage.Client()\n\n source_bucket = storage_client.bucket(bucket_name)\n source_blob = source_bucket.blob(blob_name)\n destination_bucket = storage_client.bucket(destination_bucket_name)\n\n blob_copy = source_bucket.copy_blob(\n source_blob, destination_bucket, destination_blob_name\n )\n\n print(\n \"Blob {} in bucket {} copied to blob {} in bucket {}.\".format(\n source_blob.name,\n source_bucket.name,\n blob_copy.name,\n destination_bucket.name,\n )\n )\n return source_blob\n\n def delete_object(self, dir_path):\n\n bucket = self.client.bucket(self.bucket)\n # blob = bucket.blob(f'{dir_path}')\n\n all_file = self.list_blobs_in_a_path(dir_path)\n\n for file in all_file:\n blob = bucket.blob(file.name)\n blob.delete()\n print(\"Blob {} deleted.\".format(file.name))\n\n def download_blob(self, source_blob_name, destination_file_name):\n # \"\"\"Downloads a blob from the bucket.\"\"\"\n # bucket_name = \"your-bucket-name\"\n # source_blob_name = \"storage-object-name\"\n # destination_file_name = \"local/path/to/file\"\n\n storage_client = storage.Client()\n\n bucket = storage_client.bucket(self.bucket)\n blob = bucket.blob(source_blob_name)\n blob.download_to_filename(destination_file_name)\n\n print(\n \"Blob {} from Bucket {} downloaded to {}.\".format(\n source_blob_name, self.bucket, destination_file_name\n )\n )\n", "sub_path": "packages/ekstep_data_pipelines/common/gcs_operations.py", "file_name": "gcs_operations.py", "file_ext": "py", "file_size_in_byte": 15743, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "google.cloud.storage.Client", "line_number": 32, "usage_type": "call"}, {"api_name": "google.cloud.storage", "line_number": 32, "usage_type": "name"}, {"api_name": "google.cloud.storage.Blob", "line_number": 52, "usage_type": "call"}, {"api_name": "google.cloud.storage", "line_number": 52, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "shutil.copy", "line_number": 73, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 74, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 77, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 77, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 81, "usage_type": "call"}, {"api_name": "google.cloud.storage.Client", "line_number": 106, "usage_type": "call"}, {"api_name": "google.cloud.storage", "line_number": 106, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 218, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 218, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 218, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 223, "usage_type": "call"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 224, "usage_type": "call"}, {"api_name": "google.cloud.storage.Client", "line_number": 263, "usage_type": "call"}, {"api_name": "google.cloud.storage", "line_number": 263, "usage_type": "name"}, {"api_name": "google.cloud.storage.Client", "line_number": 282, "usage_type": "call"}, {"api_name": "google.cloud.storage", "line_number": 282, "usage_type": "name"}, {"api_name": "google.cloud.storage.Client", "line_number": 303, "usage_type": "call"}, {"api_name": "google.cloud.storage", "line_number": 303, "usage_type": "name"}, {"api_name": "google.cloud.storage.Client", "line_number": 326, "usage_type": "call"}, {"api_name": "google.cloud.storage", "line_number": 326, "usage_type": "name"}, {"api_name": "google.cloud.storage.Client", "line_number": 380, "usage_type": "call"}, {"api_name": "google.cloud.storage", "line_number": 380, "usage_type": "name"}, {"api_name": "google.cloud.storage.Client", "line_number": 418, "usage_type": "call"}, {"api_name": "google.cloud.storage", "line_number": 418, "usage_type": "name"}]} +{"seq_id": "95907258", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n Training script for oculomotor tasks.\n\"\"\"\n\nimport argparse\nimport cv2\nimport os\nimport time\nimport numpy as np\nfrom distutils.util import strtobool\n\nfrom agent import Agent\nfrom functions import BG, FEF, LIP, PFC, Retina, SC, VC, HP, CB\nfrom oculoenv import Environment\nfrom oculoenv import PointToTargetContent, ChangeDetectionContent, OddOneOutContent, VisualSearchContent, \\\n MultipleObjectTrackingContent, RandomDotMotionDiscriminationContent\nfrom logger import Logger\nfrom gen_dataset import generate, generate_opt_flow\n\n\n\nclass Contents(object):\n POINT_TO_TARGET = 1\n CHANGE_DETECTION = 2\n ODD_ONE_OUT = 3\n VISUAL_SEARCH = 4\n MULTIPLE_OBJECT_TRACKING = 5\n RANDOM_DOT_MOTION_DISCRIMINATION = 6\n\n\ndef get_content(content_type):\n if content_type == Contents.POINT_TO_TARGET:\n content = PointToTargetContent()\n elif content_type == Contents.CHANGE_DETECTION:\n content = ChangeDetectionContent()\n elif content_type == Contents.ODD_ONE_OUT:\n content = OddOneOutContent()\n elif content_type == Contents.VISUAL_SEARCH:\n content = VisualSearchContent()\n elif content_type == Contents.MULTIPLE_OBJECT_TRACKING:\n content = MultipleObjectTrackingContent()\n else:\n content = RandomDotMotionDiscriminationContent()\n return content\n\n\ndef collect(content, step_size):\n retina = Retina()\n lip = LIP()\n vc = VC()\n pfc = PFC()\n fef = FEF()\n bg = BG()\n sc = SC()\n hp = HP()\n cb = CB()\n \n agent = Agent(\n retina=retina,\n lip=lip,\n vc=vc,\n pfc=pfc,\n fef=fef,\n bg=bg,\n sc=sc,\n hp=hp,\n cb=cb\n )\n \n env = Environment(content)\n\n pfc.load_model('data/pfc_task_detection.pth')\n \n obs = env.reset()\n \n reward = 0\n done = False\n \n episode_reward = 0\n episode_count = 0\n\n actions = []\n angles = []\n phases = []\n rewards = []\n targets = []\n lures = []\n\n if not os.path.exists(\"base_data\"):\n os.mkdir(\"base_data\")\n\n start_time = time.time()\n\n file_size_in_dir = 1000\n\n for i in range(step_size):\n if i % file_size_in_dir == 0:\n dir_path = \"base_data/dir{}\".format(i // file_size_in_dir)\n if not os.path.exists(dir_path):\n os.mkdir(dir_path)\n \n image, angle = obs['screen'], obs['angle']\n # Choose action by the agent's decision\n action = agent(image, angle, reward, done)\n # Foward environment one step\n obs, reward, done, info = env.step(action)\n\n phase = info['phase']\n\n if 'target' in info:\n target = info['target']\n else:\n target = (0.0, 0.0, 0.0)\n if 'lure' in info:\n lure = info['lure']\n else:\n lure = (0.0, 0.0, 0.0)\n\n actions.append(action)\n angles.append(angle)\n phases.append(phase)\n rewards.append(reward)\n targets.append(target)\n lures.append(lure)\n\n file_name = \"{}/image{}.png\".format(dir_path, i)\n image = cv2.cvtColor(obs[\"screen\"], cv2.COLOR_RGB2BGR)\n cv2.imwrite(file_name, image)\n \n episode_reward += reward\n\n if i % 1000 == 0:\n print(\"step{}\".format(i))\n elapsed_time = time.time() - start_time\n print(\"fps={}\".format(i / elapsed_time))\n\n if done:\n obs = env.reset()\n print(\"episode reward={}\".format(episode_reward))\n \n # Store log for tensorboard graph\n episode_count += 1\n \n episode_reward = 0\n\n np.savez_compressed(\"base_data/infos\",\n actions=actions,\n angles=angles,\n rewards=rewards,\n phases=phases,\n targets=targets,\n lures=lures)\n \n print(\"collecting finished\")\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--content\",\n help=\"1: Point To Target\"\n + \" 2: Change Detection\"\n + \" 3: Odd One Out\"\n + \" 4: Visual Search\"\n + \" 5: Multiple Object Tracking\"\n + \" 6: Random Dot Motion Descrimination\",\n type=int,\n default=1)\n \n # Small dataset version\n parser.add_argument(\"--step_size\", help=\"Training step size\", type=int, default=20*10000+1)\n parser.add_argument(\"--opt_flow_only\", type=strtobool, default=\"false\")\n \n args = parser.parse_args()\n \n content_type = args.content\n step_size = args.step_size\n\n # Create task content\n content = get_content(content_type)\n \n print(\"start collecting content: {} step_size={}\".format(content_type, step_size))\n\n if args.opt_flow_only:\n # Generate optical flow dataset only\n generate_opt_flow(\"base_data\", step_size-1)\n else:\n # Collect original images\n collect(content, step_size)\n\n # Generate dataset\n generate(\"base_data\", step_size-1)\n\n # Generate optical flow\n generate_opt_flow(\"base_data\", step_size-1)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "application/collect.py", "file_name": "collect.py", "file_ext": "py", "file_size_in_byte": 5298, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "oculoenv.PointToTargetContent", "line_number": 34, "usage_type": "call"}, {"api_name": "oculoenv.ChangeDetectionContent", "line_number": 36, "usage_type": "call"}, {"api_name": "oculoenv.OddOneOutContent", "line_number": 38, "usage_type": "call"}, {"api_name": "oculoenv.VisualSearchContent", "line_number": 40, "usage_type": "call"}, {"api_name": "oculoenv.MultipleObjectTrackingContent", "line_number": 42, "usage_type": "call"}, {"api_name": "oculoenv.RandomDotMotionDiscriminationContent", "line_number": 44, "usage_type": "call"}, {"api_name": "functions.Retina", "line_number": 49, "usage_type": "call"}, {"api_name": "functions.LIP", "line_number": 50, "usage_type": "call"}, {"api_name": "functions.VC", "line_number": 51, "usage_type": "call"}, {"api_name": "functions.PFC", "line_number": 52, "usage_type": "call"}, {"api_name": "functions.FEF", "line_number": 53, "usage_type": "call"}, {"api_name": "functions.BG", "line_number": 54, "usage_type": "call"}, {"api_name": "functions.SC", "line_number": 55, "usage_type": "call"}, {"api_name": "functions.HP", "line_number": 56, "usage_type": "call"}, {"api_name": "functions.CB", "line_number": 57, "usage_type": "call"}, {"api_name": "agent.Agent", "line_number": 59, "usage_type": "call"}, {"api_name": "oculoenv.Environment", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 91, "usage_type": "call"}, {"api_name": "time.time", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path", "line_number": 100, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 101, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 128, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2BGR", "line_number": 128, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 129, "usage_type": "call"}, {"api_name": "time.time", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.savez_compressed", "line_number": 147, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 159, "usage_type": "call"}, {"api_name": "distutils.util.strtobool", "line_number": 172, "usage_type": "name"}, {"api_name": "gen_dataset.generate_opt_flow", "line_number": 186, "usage_type": "call"}, {"api_name": "gen_dataset.generate", "line_number": 192, "usage_type": "call"}, {"api_name": "gen_dataset.generate_opt_flow", "line_number": 195, "usage_type": "call"}]} +{"seq_id": "632854940", "text": "# !/usr/bin/env python\n# -*- encoding:utf-8 -*-\n# Author: Kenzhaoyihui\n# Date: 2016.11.29\n# Email: kenzhaoyihui@gmail.com\n\"\"\"\nfrom splinter import Browser\n\nwith Browser() as browser:\n\t# Visit URL\n\turl = \"http://www.google.com\"\n\tbrowser.visit(url)\n\tbrowser.fill('q', 'splinter - python acceptance testing for web application')\n\t# Find and click the 'search' button\n\tbutton = browser.find_by_name('btnG')\n\tbutton.click()\n\tprint(browser.is_text_present)\n\tb\n\tif browser.is_text_present('splinter.readthedocs.io'):\n\t\tprint(\"Yes, the offical website was found!\")\n\telse:\n\t\tprint(\"No, it wasn't found... We need to improve our SEO techniques\")\n\"\"\"\n\nfrom splinter import Browser\nimport time\n\n\nusername = \"13030915\"\npassword = \"13030915\"\nurl = \"http://180.209.113.96\"\ninit_url = \"http://180.209.113.96/Florms/FormSYS.aspx\"\n\n\ndef login():\n\tb.fill(\"txtUserName\", username)\n\tb.fill(\"txtPassword\", password)\n\tb.find_by_id(\"cmdOK\").click()\n\n\ndef lj():\n\tglobal b\n\tb = Browser(driver_name='firefox')\n\tb.visit(url)\n\tb.execute_script('alert(\"Begin input!~~~\")')\n\ttime.sleep(1)\n\tb.get_alert().dismiss()\n\n\twhile b.is_element_present_by_id(\"cmdOK\"):\n\t\tlogin()\n\t\tif b.url == init_url:\n\t\t\tbreak\n\tb.find_by_text(u\"展开全部\").click()\n\ttime.sleep(1)\n\tb.find_by_xpath(\".//*[@id='ext-gen74']/li[1]/div/a/span\").click()\n\tb.driver.switch_to_frame(\"dynamic_added_tabxnode1\")\n\twhile b.is_element_not_present_by_xpath(\".//*[@id='ext-gen45']/div[2]/table/tbody/tr/td[7]/div/a/img\"):\n\t\ttime.sleep(2)\n\t\tcontinue\n\tb.find_by_xpath(\".//*[@id='ext-gen45']/div[3]/table/tbody/tr/td[7]/div/a/img\").click()\n\n\tb.driver.switch_to_default_content()\n\tb.driver.switch_to_frame(\"ext-gen107\")\n\twhile b.is_element_not_present_by_xpath(\".//*[@id='t101003015']\"):\n\t\ttime.sleep(2)\n\t\tcontinue\n\tb.find_by_xpath(\".//*[@id='t101003015']\").click()\n\n\tb.find_by_xpath(\".//*[@id='101003015']/div[4]\").click()\n\tb.driver.switch_to_frame(\"ext-gen18\")\n\twhile b.is_element_not_present_by_text(u\"重新选择\"):\n\t\ttime.sleep(2)\n\t\tcontinue\n\tb.find_by_text(u\"重新选择\").click()\n\n\n\n\n\n\nif __name__ == \"__main__\":\n\tlj()", "sub_path": "splinter_LJ.py", "file_name": "splinter_LJ.py", "file_ext": "py", "file_size_in_byte": 2053, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "splinter.Browser", "line_number": 43, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 46, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 54, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 58, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 65, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "464223210", "text": "\"\"\"Main module of the Dota 2 subreddit Responses Bot.\n\nThe main body of the script is running in this file. The comments are loaded from the subreddit\nand the script checks if the comment or submission is a response from Dota 2. If it is, a proper reply for response is\nprepared. The response is posted as a reply to the original comment/submission on Reddit.\n\nProper logging is provided - saved to 2 files as standard output and errors.\n\"\"\"\n\nfrom praw.models import Comment\n\nimport config\nfrom bot import account\nfrom util.caching import get_cache_api\nfrom util.database.database import db_api\nfrom util.logger import logger\nfrom util.str_utils import preprocess_text\n\n__author__ = 'Jonarzz'\n__maintainer__ = 'MePsyDuck'\n\ncache_api = get_cache_api()\n\n\ndef work():\n \"\"\"Main method executing the script.\n\n It connects to an account, loads dictionaries from proper files (declared in config file).\n Afterwards it executes process_comments method with proper arguments passed.\n \"\"\"\n\n reddit = account.get_account()\n logger.info('Connected to Reddit account : ' + config.USERNAME)\n\n comment_stream = reddit.subreddit(config.SUBREDDIT).stream.comments(pause_after=-1)\n submission_stream = reddit.subreddit(config.SUBREDDIT).stream.submissions(pause_after=-1)\n while True:\n for comment in comment_stream:\n if comment is None:\n break\n process_replyable(reddit, comment)\n for submission in submission_stream:\n if submission is None:\n break\n process_replyable(reddit, submission)\n\n\ndef process_replyable(reddit, replyable):\n \"\"\"Method used to check all the comments in a submission and add replies if they are responses.\n\n PRAW generates past ~100 comments/submissions on the first iteration. Then the loop only runs if there is a new\n comment/submission added to the stream. This also means that once PRAW is up and running, after the initial comments\n list it won't generate any duplicate comments.\n\n However, just as a safeguard, Caching is used to store replyable ids as they are processed for the first time.\n Otherwise, when the bot is restarted it might reply twice to same comments. If replyable id is in the already present\n in the cache_api, then it is ignored, else processed and added to the cache_api.\n * Self comments are ignored.\n * It is prepared for comparision to the responses in dictionary.\n * If the replyable is not on the excluded responses list (loaded from config) and if it is in the responses db or\n specific responses list, a reply replyable is prepared and posted.\n\n :param reddit: The reddit account instance\n :param replyable: comment or submission\n :return: None\n \"\"\"\n\n if cache_api.check(thing_id=replyable.fullname):\n return\n\n # Ignore thyself\n if replyable.author == reddit.user.me:\n return\n\n logger.debug(\"Found new replyable: \" + str(replyable.fullname))\n\n processed_body = process_body(replyable.body if isinstance(replyable, Comment) else replyable.title)\n\n # Don't reply to single word text (they're mostly common phrases).\n if ' ' not in processed_body:\n return\n\n if processed_body in config.EXCLUDED_RESPONSES:\n return\n\n if processed_body in config.CUSTOM_RESPONSES:\n add_custom_reply(replyable=replyable, custom_response=config.CUSTOM_RESPONSES[processed_body])\n\n if not flair_specific_reply_added(replyable, processed_body):\n add_regular_reply(replyable, processed_body)\n\n\ndef process_body(body_text):\n \"\"\"Method used to clean the replyable body text.\n If body text contains a quote, the first quote text is considered as the body text.\n\n Removed code to remove repeating letters in a body text because it does more harm than good - words like 'all',\n 'tree' are stripped to 'al' and 'tre' which dont match with any responses.\n\n :param body_text: The replyable body text\n :return: Processed body text\n \"\"\"\n\n if '>' in body_text:\n lines = body_text.split('\\n\\n')\n for line in lines:\n if line.startswith('>'):\n body_text = line\n break\n\n return preprocess_text(body_text)\n\n\ndef flair_specific_reply_added(replyable, processed_text):\n \"\"\"Method that tries to add a author's flair specific reply to the comment/submission.\n\n :param replyable: The comment/submission on reddit\n :param processed_text: The processed body text\n :return: True if the replyable was replied to, else False.\n \"\"\"\n hero_id = db_api.get_hero_id_by_flair_css(flair_css=replyable.author_flair_css_class)\n if hero_id:\n link, hero_id = db_api.get_link_for_response(processed_text=processed_text, hero_id=hero_id)\n if link:\n reply = create_reply(replyable=replyable, response_url=link, hero_id=hero_id)\n replyable.reply(reply)\n logger.info(\"Added: \" + replyable.fullname)\n return True\n return False\n\n\ndef add_regular_reply(replyable, processed_text):\n \"\"\"Method to create response for given replyable.\n In case of multiple matches, it used to sort responses in descending order of heroes, but now it's random.\n\n :param replyable: The comment/submission on reddit\n :param processed_text: The processed body text\n :return: None\n \"\"\"\n\n link, hero_id = db_api.get_link_for_response(processed_text=processed_text)\n\n if link and hero_id:\n img_dir = db_api.get_img_dir_by_id(hero_id=hero_id)\n\n replyable.reply(create_reply(replyable=replyable, response_url=link, hero_id=hero_id, img=img_dir))\n\n logger.info(\"Replied to: \" + replyable.id)\n\n\ndef add_custom_reply(replyable, custom_response):\n \"\"\"Method to create a custom reply for specific cases that match the custom responses set.\n\n :param replyable: The comment/submission on reddit\n :param custom_response: The matching custom response\n :return: None\n \"\"\"\n original_text = replyable.body if isinstance(replyable, Comment) else replyable.title\n\n reply = custom_response.format(original_text, config.COMMENT_ENDING)\n replyable.reply(reply)\n\n\ndef create_reply(replyable, response_url, hero_id, img=None):\n \"\"\"Method that creates a reply in reddit format.\n The reply consists of a link to the response audio file, the response itself, a warning about the sound\n and an ending added from the config file (post footer).\n \n TODO Image is currently ignored due to new reddit redesign not rendering flairs properly.\n\n :param replyable: The comment/submission on reddit\n :param response_url: The url to the response audio file\n :param hero_id: The hero_id to which the response belongs to.\n :param img: The img path to be used for reply.\n :return: The text for the comment reply.\n \"\"\"\n original_text = replyable.body if isinstance(replyable, Comment) else replyable.title\n\n hero_name = db_api.get_hero_name(hero_id)\n return \"[{}]({}) (sound warning: {}){}\".format(original_text, response_url, hero_name, config.COMMENT_ENDING)\n", "sub_path": "bot/worker.py", "file_name": "worker.py", "file_ext": "py", "file_size_in_byte": 7070, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "util.caching.get_cache_api", "line_number": 22, "usage_type": "call"}, {"api_name": "bot.account.get_account", "line_number": 32, "usage_type": "call"}, {"api_name": "bot.account", "line_number": 32, "usage_type": "name"}, {"api_name": "util.logger.logger.info", "line_number": 33, "usage_type": "call"}, {"api_name": "util.logger.logger", "line_number": 33, "usage_type": "name"}, {"api_name": "config.USERNAME", "line_number": 33, "usage_type": "attribute"}, {"api_name": "config.SUBREDDIT", "line_number": 35, "usage_type": "attribute"}, {"api_name": "config.SUBREDDIT", "line_number": 36, "usage_type": "attribute"}, {"api_name": "util.logger.logger.debug", "line_number": 75, "usage_type": "call"}, {"api_name": "util.logger.logger", "line_number": 75, "usage_type": "name"}, {"api_name": "praw.models.Comment", "line_number": 77, "usage_type": "argument"}, {"api_name": "config.EXCLUDED_RESPONSES", "line_number": 83, "usage_type": "attribute"}, {"api_name": "config.CUSTOM_RESPONSES", "line_number": 86, "usage_type": "attribute"}, {"api_name": "config.CUSTOM_RESPONSES", "line_number": 87, "usage_type": "attribute"}, {"api_name": "util.str_utils.preprocess_text", "line_number": 111, "usage_type": "call"}, {"api_name": "util.database.database.db_api.get_hero_id_by_flair_css", "line_number": 121, "usage_type": "call"}, {"api_name": "util.database.database.db_api", "line_number": 121, "usage_type": "name"}, {"api_name": "util.database.database.db_api.get_link_for_response", "line_number": 123, "usage_type": "call"}, {"api_name": "util.database.database.db_api", "line_number": 123, "usage_type": "name"}, {"api_name": "util.logger.logger.info", "line_number": 127, "usage_type": "call"}, {"api_name": "util.logger.logger", "line_number": 127, "usage_type": "name"}, {"api_name": "util.database.database.db_api.get_link_for_response", "line_number": 141, "usage_type": "call"}, {"api_name": "util.database.database.db_api", "line_number": 141, "usage_type": "name"}, {"api_name": "util.database.database.db_api.get_img_dir_by_id", "line_number": 144, "usage_type": "call"}, {"api_name": "util.database.database.db_api", "line_number": 144, "usage_type": "name"}, {"api_name": "util.logger.logger.info", "line_number": 148, "usage_type": "call"}, {"api_name": "util.logger.logger", "line_number": 148, "usage_type": "name"}, {"api_name": "praw.models.Comment", "line_number": 158, "usage_type": "argument"}, {"api_name": "config.COMMENT_ENDING", "line_number": 160, "usage_type": "attribute"}, {"api_name": "praw.models.Comment", "line_number": 177, "usage_type": "argument"}, {"api_name": "util.database.database.db_api.get_hero_name", "line_number": 179, "usage_type": "call"}, {"api_name": "util.database.database.db_api", "line_number": 179, "usage_type": "name"}, {"api_name": "config.COMMENT_ENDING", "line_number": 180, "usage_type": "attribute"}]} +{"seq_id": "204169234", "text": "import numpy as np\r\nfrom keras.layers import *\r\nimport keras\r\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau, EarlyStopping\r\nimport postprocessing\r\nimport excel\r\nimport estimate\r\nimport os\r\nimport cv2\r\nimport json\r\n\r\n\r\nclass AdaBoostCNN(object):\r\n def __init__(self, *args, **kwargs):\r\n if kwargs and args:\r\n raise ValueError(\r\n '''AdaBoostClassifier can only be called with keyword\r\n arguments for the following keywords: base_estimator ,n_estimators,\r\n learning_rate,algorithm,random_state''')\r\n allowed_keys = ['base_estimator', 'n_estimators', 'learning_rate', 'first_epochs', 'else_epochs', 'input_size', 'output_size', 'name', 'batch_size']\r\n keywords_used = kwargs.keys()\r\n for keyword in keywords_used:\r\n if keyword not in allowed_keys:\r\n raise ValueError(keyword + \": Wrong keyword used --- check spelling\")\r\n\r\n n_estimators = 10\r\n learning_rate = 2\r\n #### CNN (5)\r\n epochs = 6\r\n input_size = (256, 256, 4)\r\n output_size = (256, 256, 1)\r\n\r\n if kwargs and not args:\r\n if 'base_estimator' in kwargs:\r\n base_estimator = kwargs.pop('base_estimator')\r\n else:\r\n raise ValueError('''base_estimator can not be None''')\r\n if 'n_estimators' in kwargs: n_estimators = kwargs.pop('n_estimators')\r\n if 'learning_rate' in kwargs: learning_rate = kwargs.pop('learning_rate')\r\n ### CNN:\r\n if 'first_epochs' in kwargs: first_epochs = kwargs.pop('first_epochs')\r\n if 'else_epochs' in kwargs: else_epochs = kwargs.pop('else_epochs')\r\n if 'input_size' in kwargs: input_size = kwargs.pop('input_size')\r\n if 'output_size' in kwargs: output_size = kwargs.pop('output_size')\r\n\r\n if 'batch_size' in kwargs: batch_size = kwargs.pop('batch_size')\r\n if 'name' in kwargs: name = kwargs.pop('name')\r\n\r\n self.name = name\r\n self.base_estimator = base_estimator\r\n self.n_estimators = n_estimators\r\n self.learning_rate_ = learning_rate\r\n self.estimators_ = list()\r\n self.estimator_weights_ = np.zeros(self.n_estimators)\r\n self.estimator_errors_ = np.ones(self.n_estimators)\r\n\r\n self.first_epochs = first_epochs\r\n self.else_epochs = else_epochs\r\n self.input_size = input_size\r\n self.output_size = output_size\r\n self.batch_size = batch_size\r\n\r\n def fit(self, x, y):\r\n self.n_samples = x.shape[0]\r\n self. classes_ = np.array(sorted(list(set(y.flatten()))))\r\n\r\n# 訓練迴圈\r\n for estimator_index in range(self.n_estimators):\r\n print(\"No. {} training process.\".format(estimator_index + 1))\r\n if estimator_index == 0:\r\n sample_weight = np.ones(self.n_samples) / self.n_samples\r\n\r\n sample_weight, estimator_weight, estimator_error= self.boost(x, y, sample_weight, order = estimator_index)\r\n\r\n\r\n self.estimator_errors_[estimator_index] = estimator_error\r\n self.estimator_weights_[estimator_index] = estimator_weight\r\n\r\n # dict_estimators_weights = {}\r\n # dict_estimators_weights['estimator_weights'] = self.estimator_weights_\r\n # with open(\".\\\\result\\\\\" + self.name + 'estimator_weights.json', 'w', encoding='utf-8') as f:\r\n # json.dump(dict_estimators_weights, f)\r\n\r\n print(\"sample weight={}\".format(sample_weight))\r\n print(\"estimatore weight={}\".format(self.estimator_weights_))\r\n print(\"estimator error={}\".format(self.estimator_errors_))\r\n return self\r\n\r\n def boost(self, x, y, sample_weight, order, threshold = 0.5):\r\n\r\n if order == 0:\r\n estimator = self.base_estimator\r\n else:\r\n estimator = self.estimator\r\n\r\n # CNN 訓練\r\n weight_path = \".\\\\result\\model_record\\\\\" + self.name + '_' + str(order)\r\n saveModel = ModelCheckpoint(weight_path + '-weights-epoch{epoch:03d}-loss{loss:.5f}-val_loss{val_loss:.5f}.h5',\r\n verbose=1, save_best_only=False, save_weights_only=True)\r\n reduce_lr = ReduceLROnPlateau(monitor='val_loss', patience=10, mode='auto') # 7/9 新增\r\n callBack = [saveModel, reduce_lr]\r\n\r\n if order == 0:\r\n estimator.fit(x, y, sample_weight= sample_weight, epochs= self.first_epochs, batch_size= self.batch_size,\r\n validation_split=0.125, callbacks=callBack)\r\n else:\r\n estimator.fit(x, y, sample_weight=sample_weight, epochs=self.else_epochs, batch_size=self.batch_size,\r\n validation_split=0.125, callbacks=callBack)\r\n estimator.save(\".\\\\result\\model_record\\\\\" + self.name + '_' + str(order) + '.h5')\r\n\r\n# CNN 訓練資料預測\r\n y_pred = estimator.predict(x)\r\n# CNN 檢查預測結果比較\r\n y_threshold = y_pred >= threshold\r\n y_threshold = np.ones(y_threshold.shape, dtype=np.uint8) * y_threshold\r\n # if np.sum(y_threshold, axis=(0, 1, 2, 3)) == 0:\r\n # return None, 0, None\r\n y_incorrect = y_threshold != y\r\n incorrect = np.sum(y_incorrect, axis=(1, 2, 3)) / self.input_size[0] ** 2\r\n #\r\n estimator_error = np.dot(incorrect, sample_weight) / np.sum(sample_weight, axis=0)\r\n\r\n # y_predict_proba = estimator.predict_proba(x)\r\n y_predict_proba = y_pred\r\n\r\n # repalce zero\r\n y_predict_proba[y_predict_proba < np.finfo(y_predict_proba.dtype).eps] = np.finfo(y_predict_proba.dtype).eps\r\n\r\n # -------------------------以上完成-----------------------------\r\n self.n_classes_ = 2\r\n self.classes_ = list([0, 1])\r\n\r\n # 第一次正常運作 第一版\r\n # y_mse = np.mean((y_predict_proba - y) ** 2, axis=(1, 2, 3)) / self.output_size[0]**2\r\n # y_mse = np.mean((y_predict_proba - y) ** 2, axis=(1, 2, 3)) # 7/7\r\n # print(\"y_mse.shape = {}.\".format(y_mse.shape))\r\n # print(\"y_mse.max() = {}.\".format(y_mse.max()))\r\n # print(\"y_mse.min() = {}.\".format(y_mse.min()))\r\n # y_mse = 1 - y_mse # 7/3\r\n # y_refresh = y_mse\r\n\r\n # 第二版 使用原本的概念\r\n y_mse = np.mean((y_predict_proba - y) ** 2, axis=(1, 2, 3))\r\n y_far = (y_mse > threshold) * np.ones(y_mse.shape)\r\n y_closed = (y_mse <= threshold) * np.ones(y_mse.shape)\r\n y_refresh = y_far * (-1. / (self.n_classes_ - 1)) * y_mse + y_closed * y_mse\r\n\r\n # for sample weight update\r\n intermediate_variable = (-1. * self.learning_rate_ * (((self.n_classes_ - 1) / self.n_classes_) * y_refresh))\r\n # intermediate_variable = (-1. * self.learning_rate_ * y_refresh)\r\n\r\n # dot iterate for each row\r\n # ---------------------以上不懂------------------------------------------\r\n # update sample weight\r\n sample_weight *= np.exp(intermediate_variable)\r\n print(\"sample weight.max = {}\".format(sample_weight.max()))\r\n # sample_weight = 1 - sample_weight 7/2\r\n sample_weight_sum = np.sum(sample_weight, axis=0)\r\n if sample_weight_sum <= 0:\r\n print(\"sample weight sum <= 0\")\r\n return None, None, None\r\n # normalize sample weight\r\n sample_weight /= sample_weight_sum\r\n\r\n self.estimator = estimator\r\n return sample_weight, 1, estimator_error\r\n\r\n def test(self, x_test, y_test, data_start, batch_size=10, threshold=0.5, save_path = None):\r\n def mkdir(path):\r\n # 去除首位空格\r\n path = path.strip()\r\n # 去除尾部 \\ 符號\r\n path = path.rstrip(\"\\\\\")\r\n\r\n # 判斷路徑是否存在\r\n # 存在 True\r\n # 不存在 False\r\n isExists = os.path.exists(path)\r\n\r\n # 判斷結果\r\n if not isExists:\r\n # 如果不存在則建立目錄\r\n print(\"Building the file.\")\r\n # 建立目錄操作函式\r\n os.makedirs(path)\r\n return True\r\n else:\r\n # 如果目錄存在則不建立,並提示目錄已存在\r\n print(\"File is existing.\")\r\n return False\r\n\r\n # if x_test.ndim == 3:\r\n # x_test = np.expand_dims(x_test, axis=-1)\r\n # if y_test.ndim == 3:\r\n # y_test = np.expand_dims(y_test, axis=-1)\r\n\r\n if save_path == None:\r\n save_path = self.name\r\n\r\n # with open(\".\\\\result\\\\\" + self.name + 'estimator_weights.json', 'r', encoding='utf-8') as f:\r\n # output = json.load(f)\r\n # estimator_weights = output['estimator_weights']\r\n\r\n\r\n\r\n file_name = []\r\n for order in range(self.n_estimators):\r\n file_name.append(self.name + \"_\" + str(order) + \".h5\")\r\n print(file_name[-1])\r\n\r\n y_predict = np.zeros(y_test.shape, dtype=np.float32)\r\n c = 0\r\n for model_file in file_name:\r\n # if estimator_weights[c] == 0:\r\n # c = c + 1\r\n # continue\r\n # c = c + 1\r\n print(\"Read model weight {}\".format(\".\\\\result\\\\model_record\\\\\" + model_file))\r\n self.base_estimator.load_weights(\".\\\\result\\\\model_record\\\\\" + model_file)\r\n result = self.base_estimator.predict(x_test, batch_size=batch_size) / len(file_name)\r\n # result = self.estimator.predict(x_test, batch_size=batch_size) / np.sum(estimator_weights)\r\n print(\"Result = {}\".format(np.sum(result)))\r\n y_predict = y_predict + result\r\n\r\n\r\n print(\"Check the threshold.\")\r\n y_output = postprocessing.check_threshold(y_predict,\r\n size=self.output_size,\r\n threshold=threshold)\r\n\r\n print(\"Estimate.\")\r\n iou = estimate.IOU(y_test, y_output, self.output_size[0], len(y_test))\r\n (precision, recall, F1) = estimate.F1_estimate(y_test, y_output, self.output_size[0], len(y_test))\r\n avr_iou = np.sum(iou) / len(y_test)\r\n avr_precision = np.sum(precision) / len(y_test)\r\n avr_recall = np.sum(recall) / len(y_test)\r\n avr_F1 = np.sum(F1) / len(y_test)\r\n print(\"Average IOU:{}\".format(avr_iou))\r\n\r\n print('Save the result.')\r\n mkdir(\".\\\\result\\image\\\\\" + self.name)\r\n for index in range(len(y_test)):\r\n img_save = y_output[index] * 255\r\n cv2.imwrite(\".\\\\result\\image\\\\\" + self.name + '\\\\{}.png'.format(data_start + index), img_save)\r\n print('Save image:{}'.format(data_start + index))\r\n\r\n ex_iou = excel.Excel()\r\n ex_iou.write_loss_and_iou(save_path, 0, 0, iou, avr_iou)\r\n ex_iou.write_excel(\"e1\", \"precision\", vertical=True)\r\n ex_iou.write_excel(\"e2\", precision, vertical=True)\r\n ex_iou.write_excel(\"f1\", \"avr_precision\", vertical=True)\r\n ex_iou.write_excel(\"f2\", avr_precision, vertical=True)\r\n ex_iou.write_excel(\"g1\", \"recall\", vertical=True)\r\n ex_iou.write_excel(\"g2\", recall, vertical=True)\r\n ex_iou.write_excel(\"h1\", \"avr_recall\", vertical=True)\r\n ex_iou.write_excel(\"h2\", avr_recall, vertical=True)\r\n ex_iou.write_excel(\"i1\", \"F1\", vertical=True)\r\n ex_iou.write_excel(\"i2\", F1, vertical=True)\r\n ex_iou.write_excel(\"j1\", \"avr_F1\", vertical=True)\r\n ex_iou.write_excel(\"j2\", avr_F1, vertical=True)\r\n\r\n ex_iou.save_excel(file_name=\".\\\\result\\data\\\\\" + save_path + \"_iou.xlsx\")\r\n ex_iou.close_excel()", "sub_path": "AdaBoostCNN.py", "file_name": "AdaBoostCNN.py", "file_ext": "py", "file_size_in_byte": 11726, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "numpy.zeros", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 71, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 98, "usage_type": "call"}, {"api_name": "keras.callbacks.ReduceLROnPlateau", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 115, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.finfo", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path", "line_number": 178, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 211, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 222, "usage_type": "call"}, {"api_name": "postprocessing.check_threshold", "line_number": 227, "usage_type": "call"}, {"api_name": "estimate.IOU", "line_number": 232, "usage_type": "call"}, {"api_name": "estimate.F1_estimate", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 237, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 244, "usage_type": "call"}, {"api_name": "excel.Excel", "line_number": 247, "usage_type": "call"}]} +{"seq_id": "202964966", "text": "import torch\nimport torch.nn as nn\n\nclass Transformer:\n def __init__(self):\n self.src = torch.rand((10, 32, 512))\n self.tgt = torch.rand((20, 32, 512))\n self.encoder_init = True\n self.decoder_init = True\n\n\n def transformer(self):\n transformer_model = nn.Transformer(d_model=512, nhead=8, dim_feedforward=1, dropout=0, num_encoder_layers=6, num_decoder_layers=6)\n out = transformer_model(self.src, self.tgt)\n return out\n\n\n def encoder(self):\n encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8, dim_feedforward=1, dropout=0)\n transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6)\n memory = transformer_encoder(self.src)\n return memory\n\n def decoder(self, memory):\n decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8, dim_feedforward=1, dropout=0)\n transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)\n out = transformer_decoder(self.tgt, memory)\n return out\n\n\n def encoder_layer(self, src):\n encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8, dim_feedforward=1, dropout=0)\n \n if self.encoder_init : memory = encoder_layer(self.src)\n else : memory = encoder_layer(src)\n \n if self.encoder_init: \n self.encoder_init = None\n \n return memory\n\n def decoder_layer(self, memory, tgt):\n decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8, dim_feedforward=1, dropout=0)\n \n if self.decoder_init : out = decoder_layer(self.tgt, memory)\n else : out = decoder_layer(tgt, memory)\n \n if self.encoder_init:\n self.encoder_init = None\n \n return out\n\n\n def attention(self):\n multihead_attn = nn.MultiheadAttention(embed_dim=512, nhead=8)\n attn_output, attn_output_weights = multihead_attn(query=self.src, key=self.src, value=self.src)\n \n\n\n\ndef main():\n transformer = Transformer()\n f = transformer.transformer()\n print(f[:,0,0])\n \n e = transformer.encoder()\n d = transformer.decoder(memory=e)\n print(d[:,0,0])\n \n e = transformer.encoder_layer(src=e)\n e = transformer.encoder_layer(src=e)\n e = transformer.encoder_layer(src=e)\n e = transformer.encoder_layer(src=e)\n e = transformer.encoder_layer(src=e)\n e = transformer.encoder_layer(src=e)\n d = transformer.decoder_layer(tgt=d, memory=e)\n d = transformer.decoder_layer(tgt=d, memory=e)\n d = transformer.decoder_layer(tgt=d, memory=e)\n d = transformer.decoder_layer(tgt=d, memory=e)\n d = transformer.decoder_layer(tgt=d, memory=e)\n d = transformer.decoder_layer(tgt=d, memory=e)\n print(d[:,0,0])\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "Recurrent_neural_networks/pytorch/model/transformer/transformer.py", "file_name": "transformer.py", "file_ext": "py", "file_size_in_byte": 2828, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "torch.rand", "line_number": 6, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 7, "usage_type": "call"}, {"api_name": "torch.nn.Transformer", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.TransformerEncoderLayer", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.TransformerEncoder", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.TransformerDecoderLayer", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.nn.TransformerDecoder", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.TransformerEncoderLayer", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.nn.TransformerDecoderLayer", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "name"}, {"api_name": "torch.nn.MultiheadAttention", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 55, "usage_type": "name"}]} +{"seq_id": "132075049", "text": "from django.urls import path\n\nfrom meals.views import MealsListView, MealsCreateView, MealEditView, MealDeleteView, browse_meals_view, \\\n surprise_meal_view\n\nurlpatterns = [\n path('list/', MealsListView.as_view(), name='meals list'),\n path('create/', MealsCreateView.as_view(), name='meals create'),\n path('details/', MealEditView.as_view(), name='meals details'),\n path('delete/', MealDeleteView.as_view(), name='meals delete'),\n path('browse/', browse_meals_view, name='browse meals'),\n path('surprise/', surprise_meal_view, name='surprise meal'),\n\n]\n", "sub_path": "meals/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 590, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "meals.views.MealsListView.as_view", "line_number": 7, "usage_type": "call"}, {"api_name": "meals.views.MealsListView", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "meals.views.MealsCreateView.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "meals.views.MealsCreateView", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "meals.views.MealEditView.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "meals.views.MealEditView", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "meals.views.MealDeleteView.as_view", "line_number": 10, "usage_type": "call"}, {"api_name": "meals.views.MealDeleteView", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "meals.views.browse_meals_view", "line_number": 11, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "meals.views.surprise_meal_view", "line_number": 12, "usage_type": "argument"}]} +{"seq_id": "478422576", "text": "from django.core.exceptions import ValidationError\nfrom django.utils.translation import gettext as _\n\n\ndef card_name(value):\n left, right = '[]'\n count = 0\n\n while value:\n index = value.find(left)\n if index == -1:\n break\n value = value[index:]\n\n index = value.find(right)\n if index == -1:\n raise ValidationError(_('Contains unmatched opening bracket'))\n value = value[index:]\n\n count += 1\n\n if not count:\n raise ValidationError(_('Does not contain brackets'))\n", "sub_path": "fpy/validators.py", "file_name": "validators.py", "file_ext": "py", "file_size_in_byte": 550, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.core.exceptions.ValidationError", "line_number": 17, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 17, "usage_type": "call"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 23, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "397889810", "text": "import numpy as np\nimport pandas as pd\nimport sys\nfrom sklearn.model_selection import train_test_split\n\nlambda_input = int(sys.argv[1])\n# sigma2_input = float(sys.argv[2])\n# X_train = np.genfromtxt(sys.argv[3], delimiter = \",\")\n# y_train = np.genfromtxt(sys.argv[4])\n# X_test = np.genfromtxt(sys.argv[5], delimiter = \",\")\ndata = pd.read_csv(sys.argv[2], delimiter=\";\")\n\nX = data.drop('quality', axis=1)\ny = data.quality\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)\n\n\n## Solution for Part 1\ndef ridge_reg():\n\t## Input : Arguments to the function\n\t## Return : wRR, Final list of values to write in the file\n\tI = np.eye(X_train.shape[1])\n\treturn np.dot(np.dot(np.linalg.inv(lambda_input*I + np.dot(X_train.T,X_train)),X_train.T),y_train)\n\nwRR = ridge_reg() # Assuming wRR is returned from the function\nnp.savetxt(\"wRR_\" + str(lambda_input) + \".csv\", wRR, delimiter=\"\\n\") # write output to file\n\ndef prediction(input):\n\tv = np.genfromtxt('wRR_25.csv')\n\tpreds = np.dot(X_test, v)\n\n\treturn preds\n\ndef mse(y_true, y_pred):\n\tmean_error = np.mean(np.square(y_pred - y_true))\n\n\treturn mean_error\n\npreds = prediction(X_test)\ne = mse(y_test, preds)\nprint(e)\n\nfor l in lambda_input:\n\twRR = ridge_reg()\n# ## Solution for Part 2\n# def part2():\n# ## Input : Arguments to the function\n# ## Return : active, Final list of values to write in the file\n# pass\n\n# active = part2() # Assuming active is returned from the function\n# np.savetxt(\"active_\" + str(lambda_input) + \"_\" + str(int(sigma2_input)) + \".csv\", active, delimiter=\",\") # write output to file", "sub_path": "machine-learning/hw1_regression.py", "file_name": "hw1_regression.py", "file_ext": "py", "file_size_in_byte": 1597, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "sys.argv", "line_number": 6, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 11, "usage_type": "attribute"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 24, "usage_type": "attribute"}, {"api_name": "numpy.savetxt", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.genfromtxt", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "249774948", "text": "from django.shortcuts import render,redirect\n# from django.http import HttpResponse\nfrom .models import *\nfrom .form import OrderForm,CustomerForm,CreateUserForm\nfrom itertools import islice\nfrom django.forms import inlineformset_factory\nfrom .filters import OrderFilter\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom .decorators import unauthenticated_user,allowed_users,admin_only\nfrom django.contrib.auth.models import Group\n# Create your views here.\n\n@unauthenticated_user\ndef registerPage(request):\n # form = UserCreationForm()\n form = CreateUserForm()\n if(request.method == \"POST\"):\n form = CreateUserForm(request.POST)\n if(form.is_valid()):\n user = form.save()\n username = form.cleaned_data.get('username')\n\n group = Group.objects.get(name='customers')\n user.groups.add(group)\n\n messages.success(request,'Account was created for ' + username)\n return redirect('/login')\n context={'form':form}\n return render(request, 'accounts/register.html',context)\n\n@unauthenticated_user\ndef loginPage(request):\n context={}\n if(request.method == \"POST\"):\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate(request, username=username, password=password)\n\n if user is not None:\n login(request, user)\n return redirect('home')\n else:\n messages.info(request, \"Username OR Password is incorrect\")\n \n return render(request, 'accounts/login.html',context)\n\n@login_required(login_url='login')\ndef logoutUser(request):\n logout(request)\n return render(request,'accounts/login.html')\n\n# @login_required(login_url='login')\n# @allowed_users(allowed_roles=['admin'])\n# @admin_only\ndef home(request):\n orders = Order.objects.all()\n customers = Customer.objects.all()\n total_customers = customers.count()\n total_orders = orders.count()\n Delivered = orders.filter(status=\"Delivered\").count()\n Pending = orders.filter(status=\"Pending\").count()\n # Last 5 order\n lastorders = list(islice(reversed(orders), 0, total_orders))\n # lastorders.reverse()\n context = {'customers' :customers,'orders' :lastorders[:5],'Pending':Pending,'Delivered':Delivered,'total_orders':total_orders} \n return render(request,'accounts/dashboard.html',context=context)\n@login_required(login_url='login')\n@allowed_users(allowed_roles=['admin'])\ndef products(request):\n context = {'products':Product.objects.all()}\n return render(request,'accounts/products.html',context=context)\n\n@login_required(login_url='login')\n@allowed_users(allowed_roles=['admin'])\ndef customer(request,pk):\n customer = Customer.objects.get(id=pk)\n orders = customer.order_set.all()\n total_orders = orders.count()\n myFilter = OrderFilter(request.GET, queryset=orders)\n orders = myFilter.qs\n context = {'customer':customer, 'orders':orders,'total_orders':total_orders, 'myFilter':myFilter}\n return render(request,'accounts/customer.html',context=context)\n@login_required(login_url='login')\n@allowed_users(allowed_roles=['admin'])\ndef createOrder(request,pk):\n OrderFromSet = inlineformset_factory(Customer, Order, fields=('product', 'status'))\n customer = Customer.objects.get(id=pk)\n # form = OrderForm(initial={'customer':customer})\n formset = OrderFromSet(queryset=Order.objects.none(), instance=customer)\n if(request.method == 'POST'):\n # form = OrderForm(request.POST)\n formset = OrderFromSet(request.POST,instance=customer)\n if formset.is_valid():\n formset.save()\n return redirect('/') \n # context = {'form':form}\n context = {'formset':formset}\n return render(request, 'accounts/order_form.html',context=context)\n@login_required(login_url='login')\n@allowed_users(allowed_roles=['admin'])\ndef updateOrder(request,pk):\n order = Order.objects.get(id=pk)\n form = OrderForm(instance=order) #old data\n if(request.method=='POST'):\n form = OrderForm(request.POST,instance=order) # new data\n if(form.is_valid()):\n form.save()\n return redirect('/')\n context = {'form':form}\n return render(request, 'accounts/order_form.html',context=context)\n@login_required(login_url='login')\n@allowed_users(allowed_roles=['admin'])\ndef deleteOrder(request,pk):\n orderToDelete = Order.objects.get(id=pk)\n if(request.method == 'POST'):\n orderToDelete.delete()\n return redirect('/')\n context = {'item':orderToDelete}\n return render(request, 'accounts/delete.html',context=context)\n@login_required(login_url='login')\n@allowed_users(allowed_roles=['admin'])\ndef createCustomer(request): \n form = CustomerForm()\n if(request.method == 'POST'):\n form = CustomerForm(request.POST)\n if(form.is_valid()):\n form.save()\n return redirect('/')\n context = {'form':form}\n return render(request, 'accounts/customer_form.html',context=context)\n\ndef userPage(request):\n context={}\n return render(request, 'accounts/user.html',context)", "sub_path": "accounts/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 5280, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "form.CreateUserForm", "line_number": 19, "usage_type": "call"}, {"api_name": "form.CreateUserForm", "line_number": 21, "usage_type": "call"}, {"api_name": "form.is_valid", "line_number": 22, "usage_type": "call"}, {"api_name": "form.save", "line_number": 23, "usage_type": "call"}, {"api_name": "form.cleaned_data.get", "line_number": 24, "usage_type": "call"}, {"api_name": "form.cleaned_data", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Group.objects.get", "line_number": 26, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Group", "line_number": 26, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 29, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 29, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 30, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 32, "usage_type": "call"}, {"api_name": "decorators.unauthenticated_user", "line_number": 16, "usage_type": "name"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 40, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 43, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 44, "usage_type": "call"}, {"api_name": "django.contrib.messages.info", "line_number": 46, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 46, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 48, "usage_type": "call"}, {"api_name": "decorators.unauthenticated_user", "line_number": 34, "usage_type": "name"}, {"api_name": "django.contrib.auth.logout", "line_number": 52, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 53, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 50, "usage_type": "call"}, {"api_name": "itertools.islice", "line_number": 66, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 69, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 74, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 70, "usage_type": "call"}, {"api_name": "decorators.allowed_users", "line_number": 71, "usage_type": "call"}, {"api_name": "filters.OrderFilter", "line_number": 82, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 85, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 76, "usage_type": "call"}, {"api_name": "decorators.allowed_users", "line_number": 77, "usage_type": "call"}, {"api_name": "django.forms.inlineformset_factory", "line_number": 89, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 98, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 101, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 86, "usage_type": "call"}, {"api_name": "decorators.allowed_users", "line_number": 87, "usage_type": "call"}, {"api_name": "form.OrderForm", "line_number": 106, "usage_type": "call"}, {"api_name": "form.OrderForm", "line_number": 108, "usage_type": "call"}, {"api_name": "form.is_valid", "line_number": 109, "usage_type": "call"}, {"api_name": "form.save", "line_number": 110, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 111, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 113, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 102, "usage_type": "call"}, {"api_name": "decorators.allowed_users", "line_number": 103, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 120, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 122, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 114, "usage_type": "call"}, {"api_name": "decorators.allowed_users", "line_number": 115, "usage_type": "call"}, {"api_name": "form.CustomerForm", "line_number": 126, "usage_type": "call"}, {"api_name": "form.CustomerForm", "line_number": 128, "usage_type": "call"}, {"api_name": "form.is_valid", "line_number": 129, "usage_type": "call"}, {"api_name": "form.save", "line_number": 130, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 131, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 133, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 123, "usage_type": "call"}, {"api_name": "decorators.allowed_users", "line_number": 124, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "218660898", "text": "#This file is based on code from https://plot.ly/~colleenV/6/twitter-vs-facebook/#code\n#This file creates a side by side bar graph to show the average likes for Facebook and Instagram posts posted on each day\n\nimport plotly.plotly as py\nfrom plotly.graph_objs import *\nimport sqlite3\nimport datetime\nimport calendar\n\nconn=sqlite3.connect('206_Final.sqlite')\ncur=conn.cursor()\n\nrows=cur.execute('Select likes, time_posted from Facebook') \n\ndays={}\n\nfor row in rows:\n\tlikes=row[0]\n\n\tdate_time=row[1].split('T') #Splits timestamp into date and time\n\tdate=datetime.datetime.strptime(date_time[0], '%Y-%M-%d') #Creates datetime obkect by stripping date\n\tday=datetime.datetime.weekday(date) ##Gets numerical value for day of the week Monday is 0, Sunday is 6\n\t\n\tif day not in days: #Creates dictionaries of like counts for each day into dictionary days\n\t\tdays[day]=[likes]\n\telse:\n\t\tdays[day].append(likes)\n\nx=[]\ny=[]\n\nfor day in sorted(days.keys()): #Orders days of the week Monday-Sunday\n\t(key, value)=(calendar.day_name[day], sum(days[day])/len(days[day])) #Converts numerical day value into word, divides total likes by number of posts for each individual day\n\t#Creates tuple with day of the week and average likes as values, then separates them into lists for the x and y axes\n\tx.append(key)\n\ty.append(value)\n\ndays2={} #Repeats process for Instagram posts\n\nrows2=cur.execute('Select likes, time_posted from Instagram')\nfor row in rows:\t\n\tlikes=row[0]\n\n\tdate_time=row[1] #Creates datetime object from Unix timestamp\n\tdate=datetime.datetime.fromtimestamp(date_time)\n\tday=datetime.datetime.weekday(date)\t\n\n\tif day not in days2:\n\t\tdays2[day]=[likes]\n\telse:\n\t\tdays2[day].append(likes)\n\nw=[]\nz=[]\n\nfor day in sorted(days2.keys()):\n\t(key, value)= (calendar.day_name[day], sum(days2[day])/len(days2[day]))\n\t\n\tw.append(key)\n\tz.append(value)\n\npy.sign_in('alkahan', 'vdr8fzFddzEuRqqABNzQ')\ntrace1 = {\n \"x\": x, #Appropriate values for days of the weeks and average likes loaded into trace object for Instagram and Facebook\n \"y\": y, \n \"name\": \"Facebook\", \n \"type\": \"bar\"\n}\n\ntrace2 = {\n \"x\": w, \n \"y\": z, \n \"name\": \"Instagram\", \n \"type\": \"bar\"\n}\n\ndata = Data([trace1, trace2])\nlayout = {\"barmode\": \"group\",\n'title':'Facebook VS Instagram Average Likes By Day',\n'xaxis':{'title':'Day of the Week'},\n'yaxis':{'title': 'Average Likes'}\n}\nfig = Figure(data=data, layout=layout)\n\nplot_url = py.plot(fig)\n\ncur.close()", "sub_path": "Final-Project/avg-likes-by-day.py", "file_name": "avg-likes-by-day.py", "file_ext": "py", "file_size_in_byte": 2403, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "sqlite3.connect", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 21, "usage_type": "attribute"}, {"api_name": "datetime.datetime.weekday", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 22, "usage_type": "attribute"}, {"api_name": "calendar.day_name", "line_number": 33, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 45, "usage_type": "attribute"}, {"api_name": "datetime.datetime.weekday", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 46, "usage_type": "attribute"}, {"api_name": "calendar.day_name", "line_number": 57, "usage_type": "attribute"}, {"api_name": "plotly.plotly.sign_in", "line_number": 62, "usage_type": "call"}, {"api_name": "plotly.plotly", "line_number": 62, "usage_type": "name"}, {"api_name": "plotly.plotly.plot", "line_number": 85, "usage_type": "call"}, {"api_name": "plotly.plotly", "line_number": 85, "usage_type": "name"}]} +{"seq_id": "484359244", "text": "from flask import render_template, flash, redirect, url_for\nfrom app import app\nfrom app.forms import LoginForm\nfrom flask_login import current_user, login_user\nfrom app.models import User\nfrom app.models import Scan\nfrom app.forms import StartScanForm\nfrom app.search import find_all_with_work_id, find_scan_details_on_elastics\nfrom flask_login import logout_user\nfrom flask_login import login_required\nfrom flask import request\nfrom werkzeug.urls import url_parse\nfrom core.scan_manager import ScanManager\nfrom app import db\n\n\n@app.route('/')\n@app.route('/index')\n@login_required\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('index'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n if user is None or not user.check_password(form.password.data):\n flash('Invalid username or password')\n return redirect(url_for('login'))\n login_user(user, remember=form.remember_me.data)\n next_page = request.args.get('next')\n if not next_page or url_parse(next_page).netloc != '':\n next_page = url_for('index')\n return redirect(next_page)\n return render_template('login.html', title='Sign In', form=form)\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('index'))\n\n\n@app.route('/scan_details/')\n@login_required\ndef scan_details(scan_id):\n scan = Scan.query.filter_by(scan_id=scan_id).first_or_404()\n # result, hit_count = find_all_with_work_id(scan_id)\n statistics = find_scan_details_on_elastics(scan_id)\n\n scan.completed_perc = round(statistics['host_count']['total'] * 100 / scan.ip_count, 2)\n scan.status = 'Completed' if scan.completed_perc == 100.0 else 'Running'\n\n # prepare for chart.js\n labels = []\n counts = []\n for pair in statistics['top_ports']:\n labels.append('port ' + str(pair['key']))\n counts.append(pair['doc_count'])\n statistics['top_ports'] = {\n 'labels': labels,\n 'counts': counts\n }\n\n # prepare for chart.js\n labels = ['up', 'down']\n counts = [statistics['host_count']['up'], statistics['host_count']['total'] - statistics['host_count']['up']]\n statistics['host_count'] = {\n 'labels': labels,\n 'counts': counts\n }\n return render_template('scan_details.html', scan=scan, statistics=statistics)\n\n\n@app.route('/scan_list')\n@login_required\ndef scan_list():\n scans = Scan.query.all()\n return render_template('scan_list.html', scans=scans)\n\n\n@app.route('/start_scan', methods=['GET', 'POST'])\n@login_required\ndef start_scan():\n form = StartScanForm()\n SCANNER = 'ugly'\n if form.validate_on_submit():\n sm = ScanManager()\n result = sm.send_to_scanners(scanner=SCANNER, host_string=form.ip.data, port_string=form.port.data, params_string=None)\n # print(result)\n if result is not None:\n scan = Scan(scan_id=result['scan_id'], ip=form.ip.data, port=form.port.data, params=form.params.data,\n scanner=SCANNER, ip_count=result['ip_count'], owner=current_user)\n db.session.add(scan)\n db.session.commit()\n flash('Scan is started with scan_id: ' + result['scan_id'] + ' ip_count: ' + str(result['ip_count']))\n return redirect(url_for('scan_details', scan_id=result['scan_id']))\n else:\n flash('Unkown parameters')\n\n elif request.method == 'GET':\n form.ip.data = ''\n form.port.data = ''\n return render_template('start_scan.html', title='Start New Scan', form=form)\n", "sub_path": "webui/app/routes.py", "file_name": "routes.py", "file_ext": "py", "file_size_in_byte": 3743, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "flask.render_template", "line_number": 21, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 17, "usage_type": "call"}, {"api_name": "app.app", "line_number": 17, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 18, "usage_type": "call"}, {"api_name": "app.app", "line_number": 18, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 19, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 26, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 26, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 27, "usage_type": "call"}, {"api_name": "app.forms.LoginForm", "line_number": 28, "usage_type": "call"}, {"api_name": "app.models.User.query.filter_by", "line_number": 30, "usage_type": "call"}, {"api_name": "app.models.User.query", "line_number": 30, "usage_type": "attribute"}, {"api_name": "app.models.User", "line_number": 30, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 33, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 33, "usage_type": "call"}, {"api_name": "flask_login.login_user", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 35, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 35, "usage_type": "name"}, {"api_name": "werkzeug.urls.url_parse", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 39, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 24, "usage_type": "call"}, {"api_name": "app.app", "line_number": 24, "usage_type": "name"}, {"api_name": "flask_login.logout_user", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 44, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 41, "usage_type": "call"}, {"api_name": "app.app", "line_number": 41, "usage_type": "name"}, {"api_name": "app.models.Scan.query.filter_by", "line_number": 50, "usage_type": "call"}, {"api_name": "app.models.Scan.query", "line_number": 50, "usage_type": "attribute"}, {"api_name": "app.models.Scan", "line_number": 50, "usage_type": "name"}, {"api_name": "app.search.find_scan_details_on_elastics", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 75, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 47, "usage_type": "call"}, {"api_name": "app.app", "line_number": 47, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 48, "usage_type": "name"}, {"api_name": "app.models.Scan.query.all", "line_number": 81, "usage_type": "call"}, {"api_name": "app.models.Scan.query", "line_number": 81, "usage_type": "attribute"}, {"api_name": "app.models.Scan", "line_number": 81, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 82, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 78, "usage_type": "call"}, {"api_name": "app.app", "line_number": 78, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 79, "usage_type": "name"}, {"api_name": "app.forms.StartScanForm", "line_number": 88, "usage_type": "call"}, {"api_name": "core.scan_manager.ScanManager", "line_number": 91, "usage_type": "call"}, {"api_name": "app.models.Scan", "line_number": 95, "usage_type": "call"}, {"api_name": "flask_login.current_user", "line_number": 96, "usage_type": "name"}, {"api_name": "app.db.session.add", "line_number": 97, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 97, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 97, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 98, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 98, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 98, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 99, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 100, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 100, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 102, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 104, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 104, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 107, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 85, "usage_type": "call"}, {"api_name": "app.app", "line_number": 85, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 86, "usage_type": "name"}]} +{"seq_id": "472475902", "text": "import ConfigParser\nimport hashlib\nimport logging\nimport argparse\nimport pprint\nimport os, os.path\nfrom mediafire import MediaFireLib\nfrom retry import retry\nimport urllib2\nfrom os.path import expanduser\n\nlogging.basicConfig(level=logging.DEBUG)\n\nlog = logging.getLogger(__name__)\n\nconfig = ConfigParser.ConfigParser()\nconfigFile = os.path.join(os.path.expanduser(\"~\"),\".mediafire/config\")\nconfig.readfp(open(configFile))\n\nemail = config.get(\"Settings\",\"email\")\npassword = config.get(\"Settings\",\"password\")\napplicationid = config.get(\"Settings\",\"applicationid\")\napikey = config.get(\"Settings\",\"apikey\")\n\nmf = MediaFireLib(_userMail = email, _userPassword = password, _appID = applicationid, _apiKey = apikey)\nmf.user_getSessionToken()\n\ndef sha256sum(filename):\n sha256 = hashlib.sha256()\n with open(filename,'rb') as f: \n for chunk in iter(lambda: f.read(128*sha256.block_size), b''): \n sha256.update(chunk)\n return sha256.hexdigest()\n\n@retry(urllib2.URLError, tries=3)\ndef get_or_create_folder(folder_path):\n mf.user_getSessionToken()\n\n if not folder_path.startswith('/'):\n raise Exception(\"Folder path should start with /\") \n\n if folder_path == '/':\n # Root folder does not have a folder_key. Return empty string.\n return ''\n\n folder_pieces = folder_path.strip('/').split('/')\n\n folder_key = \"\"\n for piece in folder_pieces:\n result = mf.folder_getContent(folder_key = folder_key, content_type='folders')\n\n # Check if piece is in folders\n new_folder_key = None\n for folder in result['folder_content']['folders']:\n if folder['name'] == piece:\n new_folder_key = folder['folderkey']\n\n # If the folder already exists set the folder_key to the one found and continue deeper\n if new_folder_key:\n folder_key = new_folder_key\n else:\n log.debug(\"Creating folder %s under parentkey %s\" % (piece, folder_key))\n result = mf.folder_create(piece, parentKey = folder_key)\n folder_key = result['folderkey']\n\n return folder_key\n\n\ndef upload(args):\n\n if args.destination.endswith('/'):\n args.destination = args.destination.rstrip('/')\n\n # The upload command lets the user specify multiple files or directories\n for upload_file in args.files:\n\n # If the upload_file is a directory then recurse through all the files and upload them\n if os.path.isdir(upload_file):\n\n for root, _, files in os.walk(upload_file):\n for f in files:\n full_path = os.path.join(root, f)\n partial_path = os.path.join(root, f)[len(upload_file):] \n\n destination_path = args.destination + partial_path\n destination_folder_path = os.path.dirname(destination_path)\n\n folder_key = get_or_create_folder(destination_folder_path)\n\n basename = os.path.basename(full_path)\n \n should_upload = True\n\n if args.dryrun:\n should_upload = False\n\n # Check to see if the folder contains the file already\n result = mf.folder_getContent(folder_key = folder_key, content_type='files')\n\n files_uploaded = filter(lambda x: x['filename'] == basename, result['folder_content']['files'])\n\n if len(files_uploaded) > 0:\n\n uploaded_file = files_uploaded[0]\n if uploaded_file['hash'] == sha256sum(full_path):\n should_upload = False\n log.info(\"Skipping file %s\" % basename)\n\n if should_upload:\n log.info(\"Uploading file %s\" % basename)\n mf.upload_UploadFile(full_path, folderKey = folder_key)\n\n # If the upload_file is a file object\n if os.path.isfile(upload_file):\n pass\n\n\n\n# create the top-level parser\nparser = argparse.ArgumentParser(description='Mediafire commandline tools')\nsubparsers = parser.add_subparsers()\n\nparser.add_argument('--dryrun', action='store_true', help=\"Do a dry run without changing anything.\")\n\nupload_parser = subparsers.add_parser('upload')\nupload_parser.add_argument('upload', action='store_true', help='Upload a file or directory')\nupload_parser.add_argument('files', nargs='*')\nupload_parser.add_argument('destination')\nupload_parser.set_defaults(func=upload)\n\n# parse the args and call whatever function was selected\nargs = parser.parse_args()\nargs.func(args)\n", "sub_path": "src/mf.py", "file_name": "mf.py", "file_ext": "py", "file_size_in_byte": 4244, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "logging.basicConfig", "line_number": 12, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 12, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "ConfigParser.ConfigParser", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.expanduser", "line_number": 17, "usage_type": "call"}, {"api_name": "mediafire.MediaFireLib", "line_number": 25, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 29, "usage_type": "call"}, {"api_name": "retry.retry", "line_number": 35, "usage_type": "call"}, {"api_name": "urllib2.URLError", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path", "line_number": 114, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 120, "usage_type": "call"}]} +{"seq_id": "415661371", "text": "import argparse\nfrom time import perf_counter as clock\n\nimport dask\nimport time\nimport dask.array as da\nfrom distributed import Client, LocalCluster\nfrom distributed.utils import format_bytes\n\n\ndef parse_args(args):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-p\", \"--protocol\", choices=['ucx', 'tcp', 'inproc'],\n default=\"ucx\")\n parser.add_argument(\"-s\", \"--server\", default=None, help='server address.')\n parser.add_argument(\"-n\", \"--port\", default=\"13337\")\n\n return parser.parse_args(args)\n\n\ndef main(args=None):\n args = parse_args(args)\n\n if args.protocol == 'ucx':\n sched_str = \"ucx://\"+ args.server + \":\" + args.port\n client = Client(sched_str)\n else:\n kwargs = {'n_workers': 2, 'threads_per_worker': 40}\n kwargs['processes'] = args.protocol == 'tcp'\n cluster = LocalCluster(**kwargs)\n client = Client(cluster)\n\n print(f\"Connected to {client}\")\n N = 1_000_000\n P = 1_000\n X = da.random.uniform(size=(N, P), chunks=(N//100, P))\n print(format_bytes(X.nbytes))\n\n result = X.T.dot(X)\n start = clock()\n result.compute()\n stop = clock()\n print(result)\n print(f\"\\tTook {stop - start:0.2f}s\")\n time.sleep(10)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "benchmarks/old_tests/dask-dot-prod.py", "file_name": "dask-dot-prod.py", "file_ext": "py", "file_size_in_byte": 1286, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 12, "usage_type": "call"}, {"api_name": "distributed.Client", "line_number": 26, "usage_type": "call"}, {"api_name": "distributed.LocalCluster", "line_number": 30, "usage_type": "call"}, {"api_name": "distributed.Client", "line_number": 31, "usage_type": "call"}, {"api_name": "dask.array.random.uniform", "line_number": 36, "usage_type": "call"}, {"api_name": "dask.array.random", "line_number": 36, "usage_type": "attribute"}, {"api_name": "dask.array", "line_number": 36, "usage_type": "name"}, {"api_name": "distributed.utils.format_bytes", "line_number": 37, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 40, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 42, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "316272705", "text": "from keras.models import Sequential, Model\r\nfrom keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda\r\nfrom keras.layers.advanced_activations import LeakyReLU\r\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard\r\nfrom keras.optimizers import SGD, Adam, RMSprop\r\nfrom keras.layers.merge import concatenate\r\nimport matplotlib.pyplot as plt\r\nimport keras.backend as K\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport pickle\r\nimport os, cv2\r\n\r\nLABELS = ['Aeroplane', 'Bicycle', 'Bird', 'Boat', 'Bottle',\r\n 'Bus', 'Car', 'Cat', 'Chair', 'Cow', 'Diningtable',\r\n 'Dog', 'Horse','Motorbike', 'Person', 'Pottedplant',\r\n 'Sheep', 'Sofa', 'Train', 'Tvmonitor']\r\n\r\nIMAGE_H, IMAGE_W = 416, 416\r\nGRID_H, GRID_W = 13 , 13\r\nBOX = 5\r\nCLASS = len(LABELS)\r\nTHRESHOLD = 0.3\r\nANCHORS = [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828]\r\n\r\nNO_OBJECT_SCALE = 1.0\r\nOBJECT_SCALE = 5.0\r\nCOORD_SCALE = 1.0\r\nCLASS_SCALE = 1.0\r\n\r\nBATCH_SIZE = 6\r\nWARM_UP_BATCHES = 0\r\nTRUE_BOX_BUFFER = 50\r\n\r\n# the function to implement the orgnization layer (thanks to github.com/allanzelener/YAD2K)\r\ndef space_to_depth_x2(x):\r\n return tf.space_to_depth(x, block_size=2)\r\n\r\ndef get_model():\r\n input_image = Input(shape=(IMAGE_H, IMAGE_W, 3))\r\n true_boxes = Input(shape=(1, 1, 1, TRUE_BOX_BUFFER , 4))\r\n\r\n model = Sequential()\r\n\r\n # Layer 1\r\n model.add(Conv2D(16, (3,3), strides=(1,1), padding='same', use_bias=False, input_shape=(416,416,3)))\r\n model.add(BatchNormalization())\r\n model.add(LeakyReLU(alpha=0.1))\r\n model.add(MaxPooling2D(pool_size=(2, 2)))\r\n\r\n # Layer 2 - 5\r\n for i in range(0,4):\r\n model.add(Conv2D(32*(2**i), (3,3), strides=(1,1), padding='same', use_bias=False))\r\n model.add(BatchNormalization())\r\n model.add(LeakyReLU(alpha=0.1))\r\n model.add(MaxPooling2D(pool_size=(2, 2)))\r\n\r\n # Layer 6\r\n model.add(Conv2D(512, (3,3), strides=(1,1), padding='same', use_bias=False))\r\n model.add(BatchNormalization())\r\n model.add(LeakyReLU(alpha=0.1))\r\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(1,1), padding='same'))\r\n\r\n # Layer 7 - 8\r\n for _ in range(0,2):\r\n model.add(Conv2D(1024, (3,3), strides=(1,1), padding='same', use_bias=False))\r\n model.add(BatchNormalization())\r\n model.add(LeakyReLU(alpha=0.1))\r\n\r\n # Layer 9\r\n model.add(Conv2D(BOX * (4 + 1 + CLASS), (1, 1), strides=(1, 1), kernel_initializer='he_normal'))\r\n model.add(Activation('linear'))\r\n model.add(Reshape((GRID_H, GRID_W, BOX, 4 + 1 + CLASS)))\r\n return model\r\n", "sub_path": "util/yolo/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 2747, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "tensorflow.space_to_depth", "line_number": 37, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 40, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 41, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 43, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 46, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 47, "usage_type": "call"}, {"api_name": "keras.layers.advanced_activations.LeakyReLU", "line_number": 48, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 49, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 53, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 54, "usage_type": "call"}, {"api_name": "keras.layers.advanced_activations.LeakyReLU", "line_number": 55, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 56, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 59, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 60, "usage_type": "call"}, {"api_name": "keras.layers.advanced_activations.LeakyReLU", "line_number": 61, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 62, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 66, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 67, "usage_type": "call"}, {"api_name": "keras.layers.advanced_activations.LeakyReLU", "line_number": 68, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 71, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 72, "usage_type": "call"}, {"api_name": "keras.layers.Reshape", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "512964772", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('profilegen', '0001_initial'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='userdetails',\n old_name='year',\n new_name='byear',\n ),\n migrations.AddField(\n model_name='userdetails',\n name='current',\n field=models.CharField(max_length=10000, null=True, blank=True),\n ),\n migrations.AddField(\n model_name='userdetails',\n name='previous',\n field=models.CharField(max_length=10000, null=True, blank=True),\n ),\n ]\n", "sub_path": "profilegen/migrations/0002_auto_20160518_1319.py", "file_name": "0002_auto_20160518_1319.py", "file_ext": "py", "file_size_in_byte": 748, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.RenameField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "175657228", "text": "import calendar\nimport datetime\n\nfrom freezegun import freeze_time\n\nfrom jam import Logger\nfrom jam import Storage\nfrom jam import Snapshot\nfrom jam import Collection\nfrom jam.backends import EphemeralBackend\n\n\nif __name__ == '__main__':\n logger = Logger(EphemeralBackend())\n storage = Storage(EphemeralBackend())\n snapshot = Snapshot(EphemeralBackend())\n collection = Collection(storage, logger, snapshot)\n\n ts2012 = calendar.timegm(datetime.datetime(year=2012, month=1, day=14).timetuple())\n ts2013 = calendar.timegm(datetime.datetime(year=2013, month=1, day=14).timetuple())\n ts2014 = calendar.timegm(datetime.datetime(year=2014, month=1, day=14).timetuple())\n\n with freeze_time('2012-01-14'):\n collection.create('key1', 'value20120114')\n\n with freeze_time('2013-01-14'):\n collection.create('key1', 'value20130114')\n snapshot = collection.snapshot()\n\n with freeze_time('2014-01-14'):\n collection.create('key1', 'value20140114')\n collection.create('2014key', 'value')\n\n frozen = collection.at_time(ts2012, Snapshot(EphemeralBackend()))\n assert frozen.read('key1').data == 'value20120114'\n try:\n key = frozen.read('2014key')\n except Exception:\n key = None\n\n assert key is None\n\n frozen = collection.at_time(ts2014, Snapshot(EphemeralBackend()))\n assert frozen.read('key1').data == 'value20140114'\n assert frozen.read('2014key').data == 'value'\n\n frozen = collection.at_time(ts2013, Snapshot(EphemeralBackend()))\n assert frozen.read('key1').data == 'value20130114'\n try:\n key = frozen.read('2014key')\n except Exception:\n key = None\n\n assert key is None\n\n collection = Collection(storage, logger, Snapshot(EphemeralBackend()), regenerate=False)\n collection.load_snapshot(snapshot)\n\n assert collection.read('key1').data == 'value20130114'\n try:\n key = collection.read('2014key')\n except Exception:\n key = None\n\n assert key is None\n\n collection.regenerate()\n\n assert collection.read('2014key').data == 'value'\n", "sub_path": "examples/timemachine.py", "file_name": "timemachine.py", "file_ext": "py", "file_size_in_byte": 2077, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "jam.Logger", "line_number": 14, "usage_type": "call"}, {"api_name": "jam.backends.EphemeralBackend", "line_number": 14, "usage_type": "call"}, {"api_name": "jam.Storage", "line_number": 15, "usage_type": "call"}, {"api_name": "jam.backends.EphemeralBackend", "line_number": 15, "usage_type": "call"}, {"api_name": "jam.Snapshot", "line_number": 16, "usage_type": "call"}, {"api_name": "jam.backends.EphemeralBackend", "line_number": 16, "usage_type": "call"}, {"api_name": "jam.Collection", "line_number": 17, "usage_type": "call"}, {"api_name": "calendar.timegm", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "call"}, {"api_name": "calendar.timegm", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 20, "usage_type": "call"}, {"api_name": "calendar.timegm", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 21, "usage_type": "call"}, {"api_name": "freezegun.freeze_time", "line_number": 23, "usage_type": "call"}, {"api_name": "freezegun.freeze_time", "line_number": 26, "usage_type": "call"}, {"api_name": "freezegun.freeze_time", "line_number": 30, "usage_type": "call"}, {"api_name": "jam.Snapshot", "line_number": 34, "usage_type": "call"}, {"api_name": "jam.backends.EphemeralBackend", "line_number": 34, "usage_type": "call"}, {"api_name": "jam.Snapshot", "line_number": 43, "usage_type": "call"}, {"api_name": "jam.backends.EphemeralBackend", "line_number": 43, "usage_type": "call"}, {"api_name": "jam.Snapshot", "line_number": 47, "usage_type": "call"}, {"api_name": "jam.backends.EphemeralBackend", "line_number": 47, "usage_type": "call"}, {"api_name": "jam.Collection", "line_number": 56, "usage_type": "call"}, {"api_name": "jam.Snapshot", "line_number": 56, "usage_type": "call"}, {"api_name": "jam.backends.EphemeralBackend", "line_number": 56, "usage_type": "call"}]} +{"seq_id": "574977969", "text": "from datetime import datetime\n\nfrom bson import ObjectId\nfrom flask import jsonify, request, current_app\nfrom flask_security import login_required, roles_accepted\n\nfrom app import app\nfrom models.mail import Mail\nfrom utils import common\n\n\n@app.route('/api/project//mailList', methods=['GET'])\n@login_required\ndef mail_list(project_id):\n total_num, mails = common.get_total_num_and_arranged_data(Mail, request.args, fuzzy_fields=['name'])\n return jsonify({'status': 'ok', 'data': {'totalNum': total_num, 'rows': mails}})\n\n\n@app.route('/api/project//addMail', methods=['POST'])\n@login_required\n@roles_accepted('admin', 'project')\ndef add_mail(project_id):\n try:\n request_data = request.get_json()\n request_data[\"status\"] = True\n request_data[\"projectId\"] = ObjectId(project_id)\n request_data[\"createAt\"] = datetime.utcnow()\n filtered_data = Mail.filter_field(request.get_json(), use_set_default=True)\n Mail.insert(filtered_data)\n return jsonify({'status': 'ok', 'data': '新增邮件成功'})\n except BaseException as e:\n current_app.logger.error(\"add_mail failed. - %s\" % str(e))\n return jsonify({'status': 'failed', 'data': '新增邮件失败 %s' % e})\n\n\n@app.route('/api/project//updateMail/', methods=['POST'])\n@login_required\n@roles_accepted('admin', 'project')\ndef update_mail(project_id, mail_id):\n try:\n request_data = request.get_json()\n request_data['lastUpdateTime'] = datetime.utcnow()\n filtered_data = Mail.filter_field(request_data)\n update_response = Mail.update({'_id': ObjectId(mail_id)}, {'$set': filtered_data})\n if update_response['n'] == 0:\n return jsonify({'status': 'failed', 'data': '未找到相应的更新数据!'})\n return jsonify({'status': 'ok', 'data': '更新成功'})\n except BaseException as e:\n current_app.logger.error(\"update_mail failed. - %s\" % str(e))\n return jsonify({'status': 'failed', 'data': '更新失败 %s' % e})\n", "sub_path": "backend/controllers/mail.py", "file_name": "mail.py", "file_ext": "py", "file_size_in_byte": 2056, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "utils.common.get_total_num_and_arranged_data", "line_number": 15, "usage_type": "call"}, {"api_name": "models.mail.Mail", "line_number": 15, "usage_type": "argument"}, {"api_name": "utils.common", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 16, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 12, "usage_type": "call"}, {"api_name": "app.app", "line_number": 12, "usage_type": "name"}, {"api_name": "flask_security.login_required", "line_number": 13, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 24, "usage_type": "name"}, {"api_name": "bson.ObjectId", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 27, "usage_type": "name"}, {"api_name": "models.mail.Mail.filter_field", "line_number": 28, "usage_type": "call"}, {"api_name": "models.mail.Mail", "line_number": 28, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 28, "usage_type": "name"}, {"api_name": "models.mail.Mail.insert", "line_number": 29, "usage_type": "call"}, {"api_name": "models.mail.Mail", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.current_app.logger.error", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 32, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 32, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 33, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 19, "usage_type": "call"}, {"api_name": "app.app", "line_number": 19, "usage_type": "name"}, {"api_name": "flask_security.login_required", "line_number": 20, "usage_type": "name"}, {"api_name": "flask_security.roles_accepted", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 41, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 42, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 42, "usage_type": "name"}, {"api_name": "models.mail.Mail.filter_field", "line_number": 43, "usage_type": "call"}, {"api_name": "models.mail.Mail", "line_number": 43, "usage_type": "name"}, {"api_name": "models.mail.Mail.update", "line_number": 44, "usage_type": "call"}, {"api_name": "models.mail.Mail", "line_number": 44, "usage_type": "name"}, {"api_name": "bson.ObjectId", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.current_app.logger.error", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 49, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 49, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 50, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 36, "usage_type": "call"}, {"api_name": "app.app", "line_number": 36, "usage_type": "name"}, {"api_name": "flask_security.login_required", "line_number": 37, "usage_type": "name"}, {"api_name": "flask_security.roles_accepted", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "212206065", "text": "from piston.emitters import Emitter\nimport logging\nlog = logging.getLogger('console.debug')\n\nclass CSVEmitter(Emitter):\n def render(self,request):\n rows = self.construct()\n return \"\\n\".join([\n ','.join([str(col) for col in row]) for row in rows])\n#Emitter registered in duct_tape/__init__.py\nfrom django.utils import simplejson\nfrom django.core.serializers.json import DateTimeAwareJSONEncoder\n\nclass ExtJSONEmitter(Emitter):\n \"\"\"\n JSON emitter, understands timestamps, wraps result set in object literal\n for Ext JS compatibility\n \"\"\"\n def render(self, request):\n cb = request.GET.get('callback')\n\n totalCount = False\n if hasattr(self,'data') and hasattr(self.data,'__len__') and len(self.data)==2:\n totalCount,self.data= self.data\n ext_dict = {'success': True, 'data': self.construct()}\n if totalCount:\n ext_dict['totalCount'] = totalCount\n\n seria = simplejson.dumps(\n ext_dict, cls=DateTimeAwareJSONEncoder, ensure_ascii=False, indent=4)\n\n # Callback\n if cb:\n return '%s(%s)' % (cb, seria)\n\n return seria\n\n#Emitter registered in duct_tape/__init__.py\n", "sub_path": "duct_tape/api/emitters.py", "file_name": "emitters.py", "file_ext": "py", "file_size_in_byte": 1207, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "logging.getLogger", "line_number": 3, "usage_type": "call"}, {"api_name": "piston.emitters.Emitter", "line_number": 5, "usage_type": "name"}, {"api_name": "piston.emitters.Emitter", "line_number": 14, "usage_type": "name"}, {"api_name": "django.utils.simplejson.dumps", "line_number": 29, "usage_type": "call"}, {"api_name": "django.utils.simplejson", "line_number": 29, "usage_type": "name"}, {"api_name": "django.core.serializers.json.DateTimeAwareJSONEncoder", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "422096165", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n# plt.style.use(\"ggplot\")\nplt.rcParams['axes.unicode_minus'] = False\nplt.rcParams['font.sans-serif']=['SimHei']\nplt.figure(figsize=(8,6))\nplt.grid(linestyle = \"--\") #设置背景网格线为虚线\n\na = ([1,1,1,2,2.5,2.5,2.5,2.5,2.5,3,3,3,3,3,3,4,4,4,4,4,4,4,5,5,5,5,5,5,6,6,7,\\\n\t1,1,1,1,1,2,2,2,2,2,2,3,3,3,3,\\\n\t3,3,4,4,5,5,5])\ndf = pd.DataFrame(a)\n# print(df)\n\n#用matplotlib来画出箱型图\nplt.boxplot(x=df.values,whis=1.5,meanline=False)\n# plt.savefig('box.svg',format='svg')\n#建议保存为svg格式,再用inkscape转为矢量图emf后插入word中\nplt.show()\n\n#用pandas自带的画图工具更快\n# df.boxplot()\n# plt.show()\n", "sub_path": "box.py", "file_name": "box.py", "file_ext": "py", "file_size_in_byte": 714, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "matplotlib.pyplot.rcParams", "line_number": 6, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 6, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 7, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 7, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.boxplot", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "474687750", "text": "\"\"\"Tests for flake8.plugins.manager.Plugin.\"\"\"\nimport argparse\nfrom unittest import mock\n\nimport pytest\n\nfrom flake8 import exceptions\nfrom flake8.options import manager as options_manager\nfrom flake8.plugins import manager\n\n\ndef test_load_plugin_fallsback_on_old_setuptools():\n \"\"\"Verify we fallback gracefully to on old versions of setuptools.\"\"\"\n entry_point = mock.Mock(spec=['load'])\n plugin = manager.Plugin('T000', entry_point)\n\n plugin.load_plugin()\n entry_point.load.assert_called_once_with()\n\n\ndef test_load_plugin_is_idempotent():\n \"\"\"Verify we use the preferred methods on new versions of setuptools.\"\"\"\n entry_point = mock.Mock(spec=['load'])\n plugin = manager.Plugin('T000', entry_point)\n\n plugin.load_plugin()\n plugin.load_plugin()\n plugin.load_plugin()\n entry_point.load.assert_called_once_with()\n\n\ndef test_load_plugin_catches_and_reraises_exceptions():\n \"\"\"Verify we raise our own FailedToLoadPlugin.\"\"\"\n entry_point = mock.Mock(spec=['load'])\n entry_point.load.side_effect = ValueError('Test failure')\n plugin = manager.Plugin('T000', entry_point)\n\n with pytest.raises(exceptions.FailedToLoadPlugin):\n plugin.load_plugin()\n\n\ndef test_load_noncallable_plugin():\n \"\"\"Verify that we do not load a non-callable plugin.\"\"\"\n entry_point = mock.Mock(spec=['load'])\n entry_point.load.return_value = mock.NonCallableMock()\n plugin = manager.Plugin('T000', entry_point)\n\n with pytest.raises(exceptions.FailedToLoadPlugin):\n plugin.load_plugin()\n entry_point.load.assert_called_once_with()\n\n\ndef test_plugin_property_loads_plugin_on_first_use():\n \"\"\"Verify that we load our plugin when we first try to use it.\"\"\"\n entry_point = mock.Mock(spec=['load'])\n plugin = manager.Plugin('T000', entry_point)\n\n assert plugin.plugin is not None\n entry_point.load.assert_called_once_with()\n\n\ndef test_execute_calls_plugin_with_passed_arguments():\n \"\"\"Verify that we pass arguments directly to the plugin.\"\"\"\n entry_point = mock.Mock(spec=['load'])\n plugin_obj = mock.Mock()\n plugin = manager.Plugin('T000', entry_point)\n plugin._plugin = plugin_obj\n\n plugin.execute('arg1', 'arg2', kwarg1='value1', kwarg2='value2')\n plugin_obj.assert_called_once_with(\n 'arg1', 'arg2', kwarg1='value1', kwarg2='value2'\n )\n\n # Extra assertions\n assert entry_point.load.called is False\n\n\ndef test_version_proxies_to_the_plugin():\n \"\"\"Verify that we pass arguments directly to the plugin.\"\"\"\n entry_point = mock.Mock(spec=['load'])\n plugin_obj = mock.Mock(spec_set=['version'])\n plugin_obj.version = 'a.b.c'\n plugin = manager.Plugin('T000', entry_point)\n plugin._plugin = plugin_obj\n\n assert plugin.version == 'a.b.c'\n\n\ndef test_register_options():\n \"\"\"Verify we call add_options on the plugin only if it exists.\"\"\"\n # Set up our mocks and Plugin object\n entry_point = mock.Mock(spec=['load'])\n plugin_obj = mock.Mock(spec_set=['name', 'version', 'add_options',\n 'parse_options'])\n option_manager = mock.MagicMock(spec=options_manager.OptionManager)\n plugin = manager.Plugin('T000', entry_point)\n plugin._plugin = plugin_obj\n\n # Call the method we're testing.\n plugin.register_options(option_manager)\n\n # Assert that we call add_options\n plugin_obj.add_options.assert_called_once_with(option_manager)\n\n\ndef test_register_options_checks_plugin_for_method():\n \"\"\"Verify we call add_options on the plugin only if it exists.\"\"\"\n # Set up our mocks and Plugin object\n entry_point = mock.Mock(spec=['load'])\n plugin_obj = mock.Mock(spec_set=['name', 'version', 'parse_options'])\n option_manager = mock.Mock(spec=['register_plugin'])\n plugin = manager.Plugin('T000', entry_point)\n plugin._plugin = plugin_obj\n\n # Call the method we're testing.\n plugin.register_options(option_manager)\n\n # Assert that we register the plugin\n assert option_manager.register_plugin.called is False\n\n\ndef test_provide_options():\n \"\"\"Verify we call add_options on the plugin only if it exists.\"\"\"\n # Set up our mocks and Plugin object\n entry_point = mock.Mock(spec=['load'])\n plugin_obj = mock.Mock(spec_set=['name', 'version', 'add_options',\n 'parse_options'])\n option_values = argparse.Namespace(enable_extensions=[])\n option_manager = mock.Mock()\n plugin = manager.Plugin('T000', entry_point)\n plugin._plugin = plugin_obj\n\n # Call the method we're testing.\n plugin.provide_options(option_manager, option_values, None)\n\n # Assert that we call add_options\n plugin_obj.parse_options.assert_called_once_with(\n option_manager, option_values, None\n )\n\n\n@pytest.mark.parametrize('ignore_list, code, expected_list', [\n (['E', 'W', 'F', 'C9'], 'W', ['E', 'F', 'C9']),\n (['E', 'W', 'F'], 'C9', ['E', 'W', 'F']),\n])\ndef test_enable(ignore_list, code, expected_list):\n \"\"\"Verify that enabling a plugin removes it from the ignore list.\"\"\"\n options = mock.Mock(ignore=ignore_list)\n optmanager = mock.Mock()\n plugin = manager.Plugin(code, mock.Mock())\n\n plugin.enable(optmanager, options)\n\n assert options.ignore == expected_list\n\n\ndef test_enable_without_providing_parsed_options():\n \"\"\"Verify that enabling a plugin removes it from the ignore list.\"\"\"\n optmanager = mock.Mock()\n plugin = manager.Plugin('U4', mock.Mock())\n\n plugin.enable(optmanager)\n\n optmanager.remove_from_default_ignore.assert_called_once_with(['U4'])\n", "sub_path": "tests/unit/test_plugin.py", "file_name": "test_plugin.py", "file_ext": "py", "file_size_in_byte": 5540, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "unittest.mock.Mock", "line_number": 14, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 14, "usage_type": "name"}, {"api_name": "flake8.plugins.manager.Plugin", "line_number": 15, "usage_type": "call"}, {"api_name": "flake8.plugins.manager", "line_number": 15, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 23, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 23, "usage_type": "name"}, {"api_name": "flake8.plugins.manager.Plugin", "line_number": 24, "usage_type": "call"}, {"api_name": "flake8.plugins.manager", "line_number": 24, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 34, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 34, "usage_type": "name"}, {"api_name": "flake8.plugins.manager.Plugin", "line_number": 36, "usage_type": "call"}, {"api_name": "flake8.plugins.manager", "line_number": 36, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 38, "usage_type": "call"}, {"api_name": "flake8.exceptions.FailedToLoadPlugin", "line_number": 38, "usage_type": "attribute"}, {"api_name": "flake8.exceptions", "line_number": 38, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 44, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 44, "usage_type": "name"}, {"api_name": "unittest.mock.NonCallableMock", "line_number": 45, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 45, "usage_type": "name"}, {"api_name": "flake8.plugins.manager.Plugin", "line_number": 46, "usage_type": "call"}, {"api_name": "flake8.plugins.manager", "line_number": 46, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 48, "usage_type": "call"}, {"api_name": "flake8.exceptions.FailedToLoadPlugin", "line_number": 48, "usage_type": "attribute"}, {"api_name": "flake8.exceptions", "line_number": 48, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 55, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 55, "usage_type": "name"}, {"api_name": "flake8.plugins.manager.Plugin", "line_number": 56, "usage_type": "call"}, {"api_name": "flake8.plugins.manager", "line_number": 56, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 64, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 64, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 65, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 65, "usage_type": "name"}, {"api_name": "flake8.plugins.manager.Plugin", "line_number": 66, "usage_type": "call"}, {"api_name": "flake8.plugins.manager", "line_number": 66, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 80, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 80, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 81, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 81, "usage_type": "name"}, {"api_name": "flake8.plugins.manager.Plugin", "line_number": 83, "usage_type": "call"}, {"api_name": "flake8.plugins.manager", "line_number": 83, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 92, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 92, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 93, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 93, "usage_type": "name"}, {"api_name": "unittest.mock.MagicMock", "line_number": 95, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 95, "usage_type": "name"}, {"api_name": "flake8.options.manager.OptionManager", "line_number": 95, "usage_type": "attribute"}, {"api_name": "flake8.options.manager", "line_number": 95, "usage_type": "name"}, {"api_name": "flake8.plugins.manager.Plugin", "line_number": 96, "usage_type": "call"}, {"api_name": "flake8.plugins.manager", "line_number": 96, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 109, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 109, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 110, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 110, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 111, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 111, "usage_type": "name"}, {"api_name": "flake8.plugins.manager.Plugin", "line_number": 112, "usage_type": "call"}, {"api_name": "flake8.plugins.manager", "line_number": 112, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 125, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 125, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 126, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 126, "usage_type": "name"}, {"api_name": "argparse.Namespace", "line_number": 128, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 129, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 129, "usage_type": "name"}, {"api_name": "flake8.plugins.manager.Plugin", "line_number": 130, "usage_type": "call"}, {"api_name": "flake8.plugins.manager", "line_number": 130, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 148, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 148, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 149, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 149, "usage_type": "name"}, {"api_name": "flake8.plugins.manager.Plugin", "line_number": 150, "usage_type": "call"}, {"api_name": "flake8.plugins.manager", "line_number": 150, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 150, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 150, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 142, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 142, "usage_type": "attribute"}, {"api_name": "unittest.mock.Mock", "line_number": 159, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 159, "usage_type": "name"}, {"api_name": "flake8.plugins.manager.Plugin", "line_number": 160, "usage_type": "call"}, {"api_name": "flake8.plugins.manager", "line_number": 160, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 160, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 160, "usage_type": "name"}]} +{"seq_id": "444208434", "text": "\"\"\"\nTo do:\n- split off application dependent code, as done for ZI_HDAWG8.py\n\nNotes:\n\n\nChangelog:\n\n20190113 WJV\n- started Changelog\n- addressed many warnings identified by PyCharm\n- started adding type annotations\n- split of stuff into _add_node_pars()\n- made some properties 'private'\n\n20190219 WJV\n- tagged some dead code with FIXM.\n\n20190219:\n- made _array_to_combined_vector_string() a @staticmethod\n\n20190417 WJV\n- merged branch 'develop' into 'feature/cc', changes:\n spec_mode_on\n spec_mode_off\n\n20190429 WJV\n- merged branch 'QCC_testing' into 'feature/cc', changes:\n load_default_settings(): awgs_0_dio_strobe_index changed from 31 (CCL) to 15 (QCC)\n\n20190612 WJV\n- merged branch 'QCC_testing' into 'feature/cc', changes:\n adds awg_sequence_acquisition_and_DIO_RED_test()\n\n20190618 WJV\n- merged branch 'develop' into 'feature/cc', changes:\n\n20190813 NH\n- merged branch 'develop' into 'feature/ZIupdateDrivers'\n- Updated driver to use new UHFQA nodes\n- Updated to support dynamic waveform upload properly. The AWG is configured when start() is called and the\n driver then chooses whether it is necessary to recompile the AWG program. The program will be recompiled\n if waveform lengths have changed. Otherwise, if waveforms have been updated they will just be downloaded\n directly to the instrument.\n\n\"\"\"\n\nimport time\nimport os\nimport logging\nimport numpy as np\nimport pycqed\n\nimport pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument as zibase\nfrom pycqed.utilities.general import check_keyboard_interrupt\n\nfrom qcodes.utils import validators\nfrom qcodes.utils.helpers import full_class\nfrom qcodes.instrument.parameter import ManualParameter\n\nlog = logging.getLogger(__name__)\n\n##########################################################################\n# Exceptions\n##########################################################################\n\n\nclass ziUHFQCSeqCError(Exception):\n \"\"\"Exception raised when the configured SeqC program does\n not match the structure needed for a given measurement in terms\n of number of samples, number of averages or the use of a delay.\"\"\"\n pass\n\n\nclass ziUHFQCHoldoffError(Exception):\n \"\"\"Exception raised when a holdoff error has occurred in either the\n input monitor or result logging unit. Increase the delay between triggers\n sent to these units to solve the problem.\"\"\"\n pass\n\nclass ziUHFQCDIOActivityError(Exception):\n \"\"\"Exception raised when insufficient activity is detected on the bits\n of the DIO to be used for controlling which qubits to measure.\"\"\"\n pass\n\nclass ziUHFQCDIOCalibrationError(Exception):\n \"\"\"Exception raised when the DIO calibration fails, meaning no signal\n delay can be found where no timing violations are detected.\"\"\"\n pass\n\n##########################################################################\n# Module level functions\n##########################################################################\n\n\ndef awg_sequence_acquisition_preamble():\n \"\"\"\n This function defines a standard AWG program preamble, which is used\n regardless of the specific acquisition mode. The preamble defines standard\n functionality of the user registers, which are used for dynamically\n controlling e.g. number of iterations in a loop, etc.\n The preamble also defines a standard way of selecting between triggering\n the readout units or the time-domain input monitor.\n \"\"\"\n preamble = \"\"\"\n// Reset error counter\nsetUserReg(4, 0);\n\n// Define standard variables\nvar loop_cnt = getUserReg(0);\nvar ro_mode = getUserReg(1);\nvar wait_dly = getUserReg(2);\nvar avg_cnt = getUserReg(3);\nvar ro_arm;\nvar ro_trig;\n\n// Configure readout mode\nif (ro_mode) {\n ro_arm = AWG_INTEGRATION_ARM;\n ro_trig = AWG_MONITOR_TRIGGER + AWG_INTEGRATION_ARM + AWG_INTEGRATION_TRIGGER;\n} else {\n ro_arm = AWG_INTEGRATION_ARM;\n ro_trig = AWG_INTEGRATION_ARM + AWG_INTEGRATION_TRIGGER;\n}\"\"\"\n return preamble\n\n\ndef array2vect(array, name):\n # this function cuts up arrays into several vectors of maximum length 1024 that are joined.\n # this is to avoid python crashes (was found to crash for vectors of\n # length> 1490)\n if len(array) > 1024:\n splitted_array = np.array_split(array, len(array)//1024)\n string_array = ['\\nvect(' + ','.join(['{:.8f}'.format(x)\n for x in sub_array]) + ')' for sub_array in splitted_array]\n return 'wave ' + name + ' = join(' + ','.join(string_array) + ');\\n'\n else:\n return 'wave ' + name + ' = ' + 'vect(' + ','.join(['{:.8f}'.format(x) for x in array]) + ');\\n'\n\n##########################################################################\n# Class\n##########################################################################\n\n\nclass UHFQC(zibase.ZI_base_instrument):\n \"\"\"\n This is the PycQED driver for the 1.8 Gsample/s UHFQA developed\n by Zurich Instruments.\n\n Requirements:\n Installation instructions for Zurich Instrument Libraries.\n 1. install ziPython 3.5/3.6 ucs4 19.05 for 64bit Windows from\n http://www.zhinst.com/downloads, https://people.zhinst.com/~niels/\n 2. upload the latest firmware to the UHFQA usingthe LabOne GUI\n \"\"\"\n\n # Define minimum required revisions\n MIN_FWREVISION = 63210\n MIN_FPGAREVISION = 63133\n\n # Define user registers\n USER_REG_LOOP_CNT = 0\n USER_REG_RO_MODE = 1\n USER_REG_WAIT_DLY = 2\n USER_REG_AVG_CNT = 3\n USER_REG_ERR_CNT = 4\n\n ##########################################################################\n # 'public' functions: device control\n ##########################################################################\n\n def __init__(self,\n name,\n device: str,\n interface: str = 'USB',\n address: str = '127.0.0.1',\n port: int = 8004,\n use_dio: bool = True,\n nr_integration_channels: int = 9,\n server: str = '',\n **kw) -> None:\n \"\"\"\n Input arguments:\n name: (str) name of the instrument\n device (str) the name of the device e.g., \"dev8008\"\n interface (str) the name of the interface to use ('1GbE' or 'USB')\n address (str) the host where the ziDataServer is running (for compatibility)\n port (int) the port to connect to for the ziDataServer (don't change)\n use_dio (bool) assert to enable the DIO interface\n nr_integration_channels (int) the number of integration channels to use (max 10)\n server: (str) the host where the ziDataServer is running (if not '' then used instead of address)\n \"\"\"\n t0 = time.time()\n\n # Override server with the old-style address argument\n if server == '':\n server = address\n\n # save some parameters\n self._nr_integration_channels = nr_integration_channels\n self._use_dio = use_dio\n\n # Used for keeping track of which nodes we are monitoring for data\n self._acquisition_nodes = []\n\n # The following members define the characteristics of the configured\n # AWG program\n self._reset_awg_program_features()\n\n # The actual codeword cases used in a given program\n self._cases = None\n\n # Used for extra DIO output to CC for debugging\n self._diocws = None\n\n # Holds the DIO calibration delay\n self._dio_calibration_delay = 0\n\n # Define parameters that should not be part of the snapshot\n self._params_to_exclude = set(['features_code', 'system_fwlog', 'system_fwlogenable'])\n\n # Our base class includes all the functionality needed to initialize the parameters\n # of the object. Those parameters are read from instrument-specific JSON files stored\n # in the zi_parameter_files folder.\n super().__init__(name=name, device=device, interface=interface,\n server=server, port=port, num_codewords=2**nr_integration_channels,\n **kw)\n\n # Disable disfunctional parameters from snapshot\n self._params_to_exclude = set(['features_code', 'system_fwlog', 'system_fwlogenable'])\n\n # Set default waveform length to 20 ns at 1.8 GSa/s\n self._default_waveform_length = 32\n\n # Mask used for detecting codeword activity during DIO calibration\n self._dio_calibration_mask = None\n\n t1 = time.time()\n log.info(f'{self.devname}: Initialized UHFQC in {t1 - t0}s')\n\n ##########################################################################\n # Overriding Qcodes InstrumentBase methods\n ##########################################################################\n\n def snapshot_base(self, update: bool=False,\n params_to_skip_update =None,\n params_to_exclude = None ):\n \"\"\"\n State of the instrument as a JSON-compatible dict.\n Args:\n update: If True, update the state by querying the\n instrument. If False, just use the latest values in memory.\n params_to_skip_update: List of parameter names that will be skipped\n in update even if update is True. This is useful if you have\n parameters that are slow to update but can be updated in a\n different way (as in the qdac)\n Returns:\n dict: base snapshot\n \"\"\"\n\n if params_to_exclude is None:\n params_to_exclude = self._params_to_exclude\n\n snap = {\n \"functions\": {name: func.snapshot(update=update)\n for name, func in self.functions.items()},\n \"submodules\": {name: subm.snapshot(update=update)\n for name, subm in self.submodules.items()},\n \"__class__\": full_class(self)\n }\n\n snap['parameters'] = {}\n for name, param in self.parameters.items():\n if params_to_exclude and name in params_to_exclude:\n pass\n elif params_to_skip_update and name in params_to_skip_update:\n update_par = False\n else:\n update_par = update\n try:\n snap['parameters'][name] = param.snapshot(update=update_par)\n except:\n logging.info(\"Snapshot: Could not update parameter: {}\".format(name))\n snap['parameters'][name] = param.snapshot(update=False)\n\n for attr in set(self._meta_attrs):\n if hasattr(self, attr):\n snap[attr] = getattr(self, attr)\n return snap\n\n ##########################################################################\n # Overriding ZI_base_instrument methods\n ##########################################################################\n\n def _check_devtype(self):\n if self.devtype != 'UHFQA':\n raise zibase.ziDeviceError(\n 'Device {} of type {} is not a UHFQA instrument!'.format(self.devname, self.devtype))\n\n def _check_options(self):\n \"\"\"\n Checks that the correct options are installed on the instrument.\n \"\"\"\n options = self.gets('features/options').split('\\n')\n if 'QA' not in options and 'QC' not in options:\n raise zibase.ziOptionsError(\n 'Device {} is missing the QA or QC option!'.format(self.devname))\n if 'AWG' not in options:\n raise zibase.ziOptionsError(\n 'Device {} is missing the AWG option!'.format(self.devname))\n\n def _check_awg_nr(self, awg_nr):\n \"\"\"\n Checks that the given AWG index is valid for the device.\n \"\"\"\n if (awg_nr != 0):\n raise zibase.ziValueError(\n 'Invalid AWG index of {} detected!'.format(awg_nr))\n\n def _check_versions(self):\n \"\"\"\n Checks that sufficient versions of the firmware are available.\n \"\"\"\n if self.geti('system/fwrevision') < UHFQC.MIN_FWREVISION:\n raise zibase.ziVersionError('Insufficient firmware revision detected! Need {}, got {}!'.format(\n UHFQC.MIN_FWREVISION, self.geti('system/fwrevision')))\n\n if self.geti('system/fpgarevision') < UHFQC.MIN_FPGAREVISION:\n raise zibase.ziVersionError('Insufficient FPGA revision detected! Need {}, got {}!'.format(\n UHFQC.MIN_FPGAREVISION, self.geti('system/fpgarevision')))\n\n def _num_channels(self):\n return 2\n\n def _add_extra_parameters(self) -> None:\n \"\"\"\n We add a few additional custom parameters on top of the ones defined in the device files. These are:\n qas_0_trans_offset_weightfunction - an offset correction parameter for all weight functions,\n this allows normalized calibration when performing cross-talk suppressed readout. The parameter\n is not actually used in this driver, but in some of the support classes that make use of the driver.\n AWG_file - allows the user to configure the AWG with a SeqC program from a specific file.\n Provided only because the old version of the driver had this parameter. It is discouraged to use\n it.\n wait_dly - a parameter that enables the user to set a delay in AWG clocks cycles (4.44 ns) to be\n applied between when the AWG starts playing the readout waveform, and when it triggers the\n actual readout.\n cases - a parameter that can be used to define which combination of readout waveforms to actually\n download to the instrument. As the instrument has a limited amount of memory available, it is\n not currently possible to store all 1024 possible combinations of readout waveforms that would\n be required to address the maximum number of qubits supported by the instrument (10). Therefore,\n the 'cases' mechanism is used to reduce that number to the combinations actually needed by\n an experiment.\n dio_calibration_delay - the delay that is programmed on the DIO lines as part of the DIO calibration\n process in order for the instrument to reliably sample data from the CC. Can be used to detect\n unexpected changes in timing of the entire system. The parameter can also be used to force a specific\n delay to be used on the DIO although that is not generally recommended.\n \"\"\"\n super()._add_extra_parameters()\n\n # storing an offset correction parameter for all weight functions,\n # this allows normalized calibration when performing cross-talk suppressed\n # readout\n for i in range(self._nr_integration_channels):\n self.add_parameter(\n \"qas_0_trans_offset_weightfunction_{}\".format(i),\n unit='', # unit is adc value\n label='RO normalization offset',\n initial_value=0.0,\n docstring='an offset correction parameter for all weight functions, '\n 'this allows normalized calibration when performing cross-talk suppressed readout. The parameter '\n 'is not actually used in this driver, but in some of the support classes that make use of the driver.',\n parameter_class=ManualParameter)\n\n self.add_parameter(\n 'AWG_file',\n set_cmd=self._do_set_AWG_file,\n docstring='Configures the AWG with a SeqC program from a specific file. '\n 'Provided only for backwards compatibility. It is discouraged to use '\n 'this parameter unless you know what you are doing',\n vals=validators.Anything())\n\n self.add_parameter(\n 'wait_dly',\n set_cmd=self._set_wait_dly,\n get_cmd=self._get_wait_dly,\n unit='',\n label='AWG cycle delay',\n docstring='Configures a delay in AWG clocks cycles (4.44 ns) to be '\n 'applied between when the AWG starts playing the readout waveform, and when it triggers the '\n 'actual readout.',\n vals=validators.Ints())\n\n self.add_parameter(\n 'cases',\n set_cmd=self._set_cases,\n get_cmd=self._get_cases,\n docstring='Configures which combination of readout waveforms to actually '\n 'download to the instrument. As the instrument has a limited amount of memory available, it is '\n 'not currently possible to store all 1024 possible combinations of readout waveforms that would '\n 'be required to address the maximum number of qubits supported by the instrument (10). Therefore, '\n 'the \\'cases\\' mechanism is used to reduce that number to the combinations actually needed by '\n 'an experiment. The parameter must be set to a list of integers. The list defines the codewords '\n 'to be handled by the AWG program. For example, setting the parameter to [1, 5, 7] would result in '\n 'an AWG program that handles only codewords 1, 5 and 7. When running, if the AWG receives a codeword '\n 'that is not part of this list, an error will be triggered.',\n vals=validators.Lists())\n\n self.add_parameter('dio_calibration_delay',\n set_cmd=self._set_dio_calibration_delay,\n get_cmd=self._get_dio_calibration_delay,\n unit='',\n label='DIO Calibration delay',\n docstring='Configures the internal delay in 300 MHz cycles (3.3 ns) '\n 'to be applied on the DIO interface in order to achieve reliable sampling '\n 'of the codewords. The valid range is 0 to 15.',\n vals=validators.Ints())\n\n def _codeword_table_preamble(self, awg_nr):\n \"\"\"\n Defines a snippet of code to use in the beginning of an AWG program in order to define the waveforms.\n The generated code depends on the instrument type. For the UHF-QA we simply define the raw waveforms.\n \"\"\"\n program = ''\n\n # If the program doesn't need waveforms, just return here\n if not self._awg_program_features['waves']:\n return program\n\n # If the program needs cases, but none are defined, flag it as an error\n if self._awg_program_features['cases'] and self._cases is None:\n raise zibase.ziConfigurationError(\n 'Missing definition of cases for AWG program!')\n\n wf_table = self._get_waveform_table(awg_nr)\n for dio_cw, (wf_l, wf_r) in enumerate(wf_table):\n csvname_l = self.devname + '_' + wf_l\n csvname_r = self.devname + '_' + wf_r\n program += 'wave {} = \"{}\";\\n'.format(\n wf_l, csvname_l)\n program += 'wave {} = \"{}\";\\n'.format(\n wf_r, csvname_r)\n return program\n\n ##########################################################################\n # 'public' overrides for ZI_base_instrument\n ##########################################################################\n\n def assure_ext_clock(self) -> None:\n \"\"\"\n Make sure the instrument is using an external reference clock\n \"\"\"\n # get source:\n # 1: external\n # 0: internal (commanded so, or because of failure to sync to external clock)\n source = self.system_extclk()\n if source == 1:\n return\n\n print('Switching to external clock. This could take a while!')\n while True:\n self.system_extclk(1)\n timeout = 10\n while timeout > 0:\n time.sleep(0.1)\n status = self.system_extclk()\n if status == 1: # synced\n break\n else: # sync failed\n timeout -= 0.1\n print('X', end='')\n if self.system_extclk() != 1:\n print(' Switching to external clock failed. Trying again.')\n else:\n break\n print('\\nDone')\n\n def load_default_settings(self, upload_sequence=True) -> None:\n # standard configurations adapted from Haendbaek's notebook\n\n # The averaging-count is used to specify how many times the AWG program\n # should run\n LOG2_AVG_CNT = 10\n\n # Load an AWG program\n if upload_sequence:\n self.awg_sequence_acquisition()\n\n # Setting the clock to external\n self.system_extclk(1)\n\n # Turn on both outputs\n self.sigouts_0_on(1)\n self.sigouts_1_on(1)\n\n # Set the output channels to 50 ohm\n self.sigouts_0_imp50(True)\n self.sigouts_1_imp50(True)\n\n # Configure the analog trigger input 1 of the AWG to assert on a rising\n # edge on Ref_Trigger 1 (front-panel of the instrument)\n self.awgs_0_triggers_0_rising(1)\n self.awgs_0_triggers_0_level(0.000000000)\n self.awgs_0_triggers_0_channel(2)\n\n # Configure the digital trigger to be a rising-edge trigger\n self.awgs_0_auxtriggers_0_slope(1)\n\n # Straight connection, signal input 1 to channel 1, signal input 2 to\n # channel 2\n\n self.qas_0_deskew_rows_0_cols_0(1.0)\n self.qas_0_deskew_rows_0_cols_1(0.0)\n self.qas_0_deskew_rows_1_cols_0(0.0)\n self.qas_0_deskew_rows_1_cols_1(1.0)\n\n # Configure the codeword protocol\n if self._use_dio:\n self.dios_0_mode(2) # QuExpress thresholds on DIO (mode == 2), AWG control of DIO (mode == 1)\n self.dios_0_drive(0x3) # Drive DIO bits 15 to 0\n self.dios_0_extclk(2) # 50 MHz clocking of the DIO\n self.awgs_0_dio_strobe_slope(0) # no edge, replaced by dios_0_extclk(2)\n self.awgs_0_dio_strobe_index(15) # NB: 15 for QCC (was 31 for CCL). Irrelevant now we use 50 MHz clocking\n self.awgs_0_dio_valid_polarity(2) # high polarity\n self.awgs_0_dio_valid_index(16)\n\n # No rotation on the output of the weighted integration unit, i.e. take\n # real part of result\n for i in range(0, self._nr_integration_channels):\n self.set('qas_0_rotations_{}'.format(i), 1.0 + 0.0j)\n # remove offsets to weight function\n self.set('qas_0_trans_offset_weightfunction_{}'.format(i), 0.0)\n\n # No cross-coupling in the matrix multiplication (identity matrix)\n self.reset_crosstalk_matrix()\n\n # disable correlation mode on all channels\n self.reset_correlation_params()\n\n # Configure the result logger to not do any averaging\n self.qas_0_result_length(1000)\n self.qas_0_result_averages(pow(2, LOG2_AVG_CNT))\n # result_logging_mode 2 => raw (IQ)\n self.qas_0_result_source(2)\n\n # The custom firmware will feed through the signals on Signal Input 1 to Signal Output 1 and Signal Input 2 to Signal Output 2\n # when the AWG is OFF. For most practical applications this is not really useful. We, therefore, disable the generation of\n # these signals on the output here.\n self.sigouts_0_enables_0(0)\n self.sigouts_0_enables_1(0)\n self.sigouts_1_enables_0(0)\n self.sigouts_1_enables_1(0)\n\n ##########################################################################\n # Private methods\n ##########################################################################\n\n def _reset_awg_program_features(self):\n \"\"\"\n Resets the self._awg_program_features to disable all features. The UHFQC can be configured with a number\n of application-specific AWG programs using this driver. However, all the programs share some characteristics that\n are described in the _awg_program_features dictionary. For example, all of the programs include a main loop\n that runs for a number of iterations given by a user register. This feature is indicated by the 'loop_cnt'\n item in the dictionary. In contrast, not all program include an extra loop for the number of averages that\n should be done. Therefore, the 'awg_cnt' item in the dictionary is not automatically set. The driver\n uses these features to keep track of what the current AWG program can do. It then raises errors in case\n the user tries to do something that is not supported.\n \"\"\"\n self._awg_program_features = {\n 'loop_cnt': False,\n 'avg_cnt': False,\n 'wait_dly': False,\n 'waves': False,\n 'cases': False,\n 'diocws': False}\n\n def _set_dio_calibration_delay(self, value):\n # Sanity check the value\n if value < 0 or value > 15:\n raise zibase.ziValueError(\n 'Trying to set DIO calibration delay to invalid value! Expected value in range 0 to 15. Got {}.'.format(\n value))\n\n log.info('Setting DIO calibration delay to {}'.format(value))\n # Store the value\n self._dio_calibration_delay = value\n\n # And configure the delays\n self.setd('raw/dios/0/delay', self._dio_calibration_delay)\n\n def _get_dio_calibration_delay(self):\n return self._dio_calibration_delay\n\n def _set_wait_dly(self, value):\n self.set('awgs_0_userregs_{}'.format(UHFQC.USER_REG_WAIT_DLY), value)\n\n def _get_wait_dly(self):\n return self.get('awgs_0_userregs_{}'.format(UHFQC.USER_REG_WAIT_DLY))\n\n def _set_cases(self, value):\n # Generate error if we don't have an AWG program that supports cases\n if not self._awg_program_features['cases']:\n raise zibase.ziValueError(\n 'Trying to define cases for an AWG program that does not support them!')\n\n # Check against number of codewords\n if len(value) > self._num_codewords:\n raise zibase.ziValueError('Trying to define a number of cases ({}) greater than configured number of codewords ({})!'.format(\n len(value), self._num_codewords))\n\n self._cases = value\n self._cw_mask = 0\n for case in self._cases:\n self._cw_mask |= case\n\n if self._awg_program_features['diocws'] and self._diocws is None:\n raise zibase.ziValueError(\n 'AWG program defines DIO output, but no output values have been defined!')\n\n self._awg_program[0] = \\\n awg_sequence_acquisition_preamble() + \"\"\"\n// Mask for selecting our codeword bits\nconst CW_MASK = ({:08x} << 17);\n// Counts wrong codewords\nvar err_cnt = 0;\n\"\"\".format(self._cw_mask)\n\n if self._awg_program_features['diocws']:\n self._awg_program[0] += \\\n array2vect(self._diocws, \"diocws\") + \"\"\"\n// Loop once for each DIO codeword to output\nfor (cvar i = 0; i < {}; i = i + 1) {{\"\"\".format(len(self._diocws))\n else:\n self._awg_program[0] += \"\"\"\n// Loop for all measurements\nrepeat (loop_cnt) {\"\"\"\n\n self._awg_program[0] += \"\"\"\n waitDIOTrigger();\n // Get codeword and apply mask\n var cw = getDIOTriggered() & CW_MASK;\n // Generate waveforms based on codeword output\n switch (cw) {\"\"\"\n # Add each of the cases\n for case in self._cases:\n self._awg_program[0] += \"\"\"\n case 0x{:08x}: playWave({}, {});\"\"\".format(case << 17, zibase.gen_waveform_name(0, case), zibase.gen_waveform_name(1, case))\n\n # Add a default for ensuring we see something when the other cases fail\n self._awg_program[0] += \"\"\"\n default: playWave(ones(32), ones(32)); err_cnt += 1;\n }\n wait(wait_dly);\"\"\"\n\n if self._awg_program_features['diocws']:\n self._awg_program[0] += \"\"\"\n setDIO(diocws[i]);\n\"\"\"\n self._awg_program[0] += \"\"\"\n setTrigger(ro_trig);\n setTrigger(ro_arm);\n}\nwait(300);\nsetTrigger(0);\nsetUserReg(4, err_cnt);\"\"\"\n\n self._awg_needs_configuration[0] = True\n\n def _get_cases(self):\n return self._cases\n\n def _get_waveform_table(self, awg_nr: int) -> list:\n \"\"\"\n Returns the waveform table.\n\n The waveform table determines the mapping of waveforms to DIO codewords.\n The index of the table corresponds to the DIO codeword.\n The entry is a tuple of waveform names.\n\n Example:\n [\"wave_ch7_cw000\", \"wave_ch8_cw000\",\n \"wave_ch7_cw001\", \"wave_ch8_cw001\",\n \"wave_ch7_cw002\", \"wave_ch8_cw002\"]\n\n The waveform table generated depends on the awg_nr and the codeword\n protocol.\n \"\"\"\n ch = awg_nr*2\n wf_table = []\n if self.cases() is not None:\n for case in self.cases():\n wf_table.append((zibase.gen_waveform_name(ch, case),\n zibase.gen_waveform_name(ch+1, case)))\n return wf_table\n\n ##########################################################################\n # 'public' functions\n ##########################################################################\n\n def clock_freq(self):\n return 1.8e9\n\n ##########################################################################\n # 'public' functions: utility\n ##########################################################################\n\n def reset_acquisition_params(self):\n log.info('Setting user registers to 0')\n for i in range(16):\n self.set('awgs_0_userregs_{}'.format(i), 0)\n\n self.reset_crosstalk_matrix()\n self.reset_correlation_params()\n self.reset_rotation_params()\n\n def reset_crosstalk_matrix(self):\n self.upload_crosstalk_matrix(np.eye(10))\n\n def reset_correlation_params(self):\n for i in range(10):\n self.set('qas_0_correlations_{}_enable'.format(i), 0)\n self.set('qas_0_correlations_{}_source'.format(i), 0)\n for i in range(10):\n self.set('qas_0_thresholds_{}_correlation_enable'.format(i), 0)\n self.set('qas_0_thresholds_{}_correlation_source'.format(i), 0)\n\n def reset_rotation_params(self):\n for i in range(10):\n self.set('qas_0_rotations_{}'.format(i), 1+1j)\n\n ##########################################################################\n # 'public' functions: generic AWG/waveform support\n ##########################################################################\n\n def load_awg_program_from_file(self, filename) -> None:\n \"\"\"\n Loads an awg sequence onto the UHFQA from a text file.\n File needs to obey formatting specified in the manual.\n Only provided for backwards compatibility purposes.\n \"\"\"\n print(filename)\n with open(filename, 'r') as awg_file:\n self._awg_program[0] = awg_file.read()\n self._awg_needs_configuration[0] = True\n\n def _do_set_AWG_file(self, filename) -> None:\n self.load_awg_program_from_file('UHFLI_AWG_sequences/'+filename)\n\n def awg_file(self, filename) -> None:\n \"\"\"Only provided for backwards compatibility purposes.\"\"\"\n self.load_awg_program_from_file(filename)\n\n def awg_update_waveform(self, index, data) -> None:\n raise NotImplementedError(\n 'Method not implemented! Please use the corresponding waveform parameters \\'wave_chN_cwM\\' to update waveforms!')\n\n ##########################################################################\n # 'public' functions: acquisition support\n ##########################################################################\n\n def acquisition(self, samples=100, averages=1, acquisition_time=0.010, timeout=10,\n channels=(0, 1), mode='rl') -> None:\n self.timeout(timeout)\n self.acquisition_initialize(samples, averages, channels, mode)\n data = self.acquisition_poll(samples, True, acquisition_time)\n self.acquisition_finalize()\n\n return data\n\n def acquisition_initialize(self, samples, averages, channels=(0, 1),\n mode='rl') -> None:\n # Define the channels to use and subscribe to them\n self._acquisition_nodes = []\n\n # Loop counter of AWG\n loop_cnt = samples\n\n # Make some checks on the configured AWG program\n if samples > 1 and not self._awg_program_features['loop_cnt']:\n raise ziUHFQCSeqCError(\n 'Trying to acquire {} samples using an AWG program that does not use \\'loop_cnt\\'.'.format(samples))\n\n if averages > 1 and not self._awg_program_features['avg_cnt']:\n # Adjust the AWG loop counter according to the configured program\n loop_cnt *= averages\n\n if mode == 'rl':\n for c in channels:\n path = self._get_full_path(\n 'qas/0/result/data/{}/wave'.format(c))\n self._acquisition_nodes.append(path)\n self.subs(path)\n # Enable automatic readout\n self.qas_0_result_reset(1)\n self.qas_0_result_enable(1)\n self.qas_0_result_length(samples)\n self.qas_0_result_averages(averages)\n ro_mode = 0\n else:\n for c in channels:\n path = self._get_full_path(\n 'qas/0/monitor/inputs/{}/wave'.format(c))\n self._acquisition_nodes.append(path)\n self.subs(path)\n # Enable automatic readout\n self.qas_0_monitor_reset(1)\n self.qas_0_monitor_enable(1)\n self.qas_0_monitor_length(samples)\n self.qas_0_monitor_averages(averages)\n ro_mode = 1\n\n self.set('awgs_0_userregs_{}'.format(UHFQC.USER_REG_LOOP_CNT), loop_cnt)\n self.set('awgs_0_userregs_{}'.format(UHFQC.USER_REG_RO_MODE), ro_mode)\n self.set('awgs_0_userregs_{}'.format(UHFQC.USER_REG_AVG_CNT), averages)\n if self.wait_dly() > 0 and not self._awg_program_features['wait_dly']:\n raise ziUHFQCSeqCError(\n 'Trying to use a delay of {} using an AWG program that does not use \\'wait_dly\\'.'.format(self.wait_dly()))\n self.set('awgs_0_userregs_{}'.format(UHFQC.USER_REG_WAIT_DLY), self.wait_dly())\n self.subs(self._get_full_path('auxins/0/sample'))\n\n # Generate more dummy data\n self.auxins_0_averaging(8)\n\n def acquisition_arm(self, single=True) -> None:\n # time.sleep(0.01)\n self.awgs_0_single(single)\n self.start()\n\n def acquisition_poll(self, samples, arm=True,\n acquisition_time=0.010) -> None:\n \"\"\"\n Polls the UHFQC for data.\n\n Args:\n samples (int): the expected number of samples\n arm (bool): if true arms the acquisition, disable when you\n need synchronous acquisition with some external dev\n acquisition_time (float): time in sec between polls? # TODO check with Niels H\n timeout (float): time in seconds before timeout Error is raised.\n\n \"\"\"\n data = {k: [] for k, dummy in enumerate(self._acquisition_nodes)}\n\n # Start acquisition\n if arm:\n self.acquisition_arm()\n\n # Acquire data\n gotem = [False]*len(self._acquisition_nodes)\n accumulated_time = 0\n\n while accumulated_time < self.timeout() and not all(gotem):\n dataset = self.poll(acquisition_time)\n\n # Enable the user to interrupt long (or buggy) acquisitions\n try:\n check_keyboard_interrupt()\n except KeyboardInterrupt as e:\n # Finalize acquisition before raising exception\n self.acquisition_finalize()\n raise e\n\n for n, p in enumerate(self._acquisition_nodes):\n if p in dataset:\n for v in dataset[p]:\n data[n] = np.concatenate((data[n], v['vector']))\n if len(data[n]) >= samples:\n gotem[n] = True\n accumulated_time += acquisition_time\n\n if not all(gotem):\n self.acquisition_finalize()\n for n, _c in enumerate(self._acquisition_nodes):\n if n in data:\n print(\"\\t: Channel {}: Got {} of {} samples\".format(\n n, len(data[n]), samples))\n raise TimeoutError(\"Error: Didn't get all results!\")\n\n return data\n\n def acquisition_finalize(self) -> None:\n self.stop()\n\n for p in self._acquisition_nodes:\n self.unsubs(p)\n self.unsubs(self._get_full_path('auxins/0/sample'))\n\n def check_errors(self) -> None:\n \"\"\"\n Checks the instrument for errors. As the UHFQA does not yet support the same error\n stack as the HDAWG instruments we do the checks by reading specific nodes\n in the system and then constructing similar messages as on the HDAWG.\n \"\"\"\n # If this is the first time we are called, log the detected errors, but don't raise\n # any exceptions\n if self._errors is None:\n raise_exceptions = False\n self._errors = {}\n else:\n raise_exceptions = True\n\n # Stores the errors before processing\n errors = {'messages': []}\n\n # Now check for errors from the different functional units\n if self.qas_0_result_errors() > 0:\n errors['messages'].append({\n 'code': 'RESHOLDOFF',\n 'severity': 1.0,\n 'count': self.qas_0_result_errors(),\n 'message': 'Holdoff error detected when reading Quantum Analyzer Results! '\n 'Increase the delay between trigger signals from the AWG!'})\n\n if self.qas_0_monitor_errors() > 0:\n errors['messages'].append({\n 'code': 'MONHOLDOFF',\n 'severity': 1.0,\n 'count': self.qas_0_monitor_errors(),\n 'message': 'Holdoff error detected when reading Quantum Analyzer Input Monitor! '\n 'Increase the delay between trigger signals from the AWG!'})\n\n # Check optional codeword-based errors\n if self._awg_program_features['cases'] and self.get('awgs_0_userregs_{}'.format(UHFQC.USER_REG_ERR_CNT)) > 0:\n errors['messages'].append({\n 'code': 'DIOCWCASE',\n 'severity': 1.0,\n 'count': self.get('awgs_0_userregs_{}'.format(UHFQC.USER_REG_ERR_CNT)),\n 'message': 'AWG detected invalid codewords not covered by the configured cases!'})\n\n # Asserted in case errors were found\n found_errors = False\n\n # Go through the errors and update our structure, raise exceptions if anything changed\n for m in errors['messages']:\n code = m['code']\n count = m['count']\n severity = m['severity']\n message = m['message']\n\n if not raise_exceptions:\n self._errors[code] = {\n 'count': count,\n 'severity': severity,\n 'message': message}\n log.warning('{}: Code {}: \"{}\" ({})'.format(\n self.devname, code, message, severity))\n else:\n # Optionally skip the error completely\n if code in self._errors_to_ignore:\n continue\n\n # Check if there are new errors\n if code not in self._errors or count > self._errors[code]['count']:\n log.error('{}: {} ({}/{})'.format(self.devname,\n message, code, severity))\n found_errors = True\n\n if code in self._errors:\n self._errors[code]['count'] = count\n else:\n self._errors[code] = {\n 'count': count,\n 'severity': severity,\n 'message': message}\n\n # if found_errors:\n # raise zibase.ziRuntimeError('Errors detected during run-time!')\n\n def clear_errors(self) -> None:\n self.qas_0_result_reset(1)\n self.qas_0_monitor_reset(1)\n\n ##########################################################################\n # 'public' functions: DIO support\n ##########################################################################\n\n def plot_dio(self, bits=range(32), line_length=64):\n data = self.getv('awgs/0/dio/data')\n zibase.plot_timing_diagram(data, bits, line_length)\n\n ##########################################################################\n # 'public' functions: weight & matrix function helpers\n ##########################################################################\n\n def prepare_SSB_weight_and_rotation(self, IF,\n weight_function_I=0,\n weight_function_Q=1,\n rotation_angle=0,\n length=4096 / 1.8e9,\n scaling_factor=1) -> None:\n \"\"\"\n Sets default integration weights for SSB modulation, beware does not\n load pulses or prepare the UFHQC progarm to do data acquisition\n \"\"\"\n trace_length = 4096\n tbase = np.arange(0, trace_length / 1.8e9, 1 / 1.8e9)\n cosI = np.array(np.cos(2 * np.pi * IF * tbase + rotation_angle))\n sinI = np.array(np.sin(2 * np.pi * IF * tbase + rotation_angle))\n if length < 4096 / 1.8e9:\n max_sample = int(length * 1.8e9)\n # setting the samples beyond the length to 0\n cosI[max_sample:] = 0\n sinI[max_sample:] = 0\n self.set('qas_0_integration_weights_{}_real'.format(weight_function_I),\n np.array(cosI))\n self.set('qas_0_integration_weights_{}_imag'.format(weight_function_I),\n np.array(sinI))\n self.set('qas_0_rotations_{}'.format(\n weight_function_I), scaling_factor*(1.0 + 1.0j))\n if weight_function_Q != None:\n self.set('qas_0_integration_weights_{}_real'.format(weight_function_Q),\n np.array(sinI))\n self.set('qas_0_integration_weights_{}_imag'.format(weight_function_Q),\n np.array(cosI))\n self.set('qas_0_rotations_{}'.format(\n weight_function_Q), scaling_factor*(1.0 - 1.0j))\n\n def prepare_DSB_weight_and_rotation(self, IF, weight_function_I=0, weight_function_Q=1) -> None:\n trace_length = 4096\n tbase = np.arange(0, trace_length/1.8e9, 1/1.8e9)\n cosI = np.array(np.cos(2 * np.pi*IF*tbase))\n sinI = np.array(np.sin(2 * np.pi*IF*tbase))\n self.set('qas_0_integration_weights_{}_real'.format(weight_function_I),\n np.array(cosI))\n self.set('qas_0_integration_weights_{}_real'.format(weight_function_Q),\n np.array(sinI))\n # the factor 2 is needed so that scaling matches SSB downconversion\n self.set('qas_0_rotations_{}'.format(weight_function_I), 2.0 + 0.0j)\n self.set('qas_0_rotations_{}'.format(weight_function_Q), 2.0 + 0.0j)\n\n def upload_crosstalk_matrix(self, matrix) -> None:\n \"\"\"\n Upload parameters for the 10*10 crosstalk suppression matrix.\n\n This method uses the 'qas_0_crosstalk_rows_*_cols_*' nodes.\n \"\"\"\n for i in range(np.shape(matrix)[0]): # looping over the rows\n for j in range(np.shape(matrix)[1]): # looping over the colums\n self.set('qas_0_crosstalk_rows_{}_cols_{}'.format(\n j, i), matrix[i][j])\n\n def download_crosstalk_matrix(self, nr_rows=10, nr_cols=10):\n \"\"\"\n Upload parameters for the 10*10 crosstalk suppression matrix.\n\n This method uses the 'qas_0_crosstalk_rows_*_cols_*' nodes.\n \"\"\"\n matrix = np.zeros([nr_rows, nr_cols])\n for i in range(np.shape(matrix)[0]): # looping over the rows\n for j in range(np.shape(matrix)[1]): # looping over the colums\n matrix[i][j] = self.get(\n 'qas_0_crosstalk_rows_{}_cols_{}'.format(j, i))\n return matrix\n\n ##########################################################################\n \"\"\"\n 'public' functions: sequencer functions\n Before acquisition can take place one of \"awg_sequence_acquisition_and_\"\n has to be called. These take care that the right program is uploaded.\n The variants are:\n awg_sequence_acquisition\n start acquisition after receiving a trigger, play no pulse\n awg_sequence_acquisition_and_pulse\n start acquisition after receiving a trigger,\n play the specified pulse\n awg_sequence_acquisition_and_pulse_SSB\n start acquisition after receiving a trigger,\n play an SSB pulse based on specified parameters\n awg_sequence_acquisition_and_DIO_triggered_pulse\n start acquisition after receiving a DIO trigger,\n play the pulse specified by the received DIO codeword\n cases argument specifies what codewords are supported.\n awg_sequence_acquisition_and_DIO_RED_test\n special DIO acquisition for testing real time error correction.\n \"\"\"\n ##########################################################################\n\n def awg_sequence_acquisition_and_DIO_triggered_pulse(\n self, Iwaves=None, Qwaves=None, cases=None, acquisition_delay=0, timeout=5) -> None:\n \"\"\"\n Loads the program for DIO acquisition on the AWG of the UHFQC.\n\n Arguments:\n Iwaves list of I waveforms (arrays) used (historical).\n Qwaves list of Q waveforms (arrays) used (historical).\n cases list of cases to include in the program.\n\n Uploads and compiles the AWG sequencer program.\n\n\n \"\"\"\n # setting the acquisition delay samples\n delay_samples = int(acquisition_delay*1.8e9/8)\n self.wait_dly(delay_samples)\n\n # If no cases are defined, then we simply create all possible cases\n if cases is None:\n cases = np.arange(self._num_codewords)\n else:\n if len(cases) > self._num_codewords:\n raise zibase.ziConfigurationError('More cases ({}) defined than available codewords ({})!'.format(\n len(cases), len(self._num_codewords)))\n\n # There is probably a more efficient way of doing this\n for case in cases:\n if (case < 0) or (case >= self._num_codewords):\n raise zibase.ziConfigurationError(\n 'Case {} is out of range defined by the available codewords ({})!'.format(case, len(self._num_codewords)))\n\n # Sanity check on the parameters\n if Iwaves is not None and (len(Iwaves) != len(cases)):\n raise ziUHFQCSeqCError(\n 'Number of I channel waveforms ({}) does not match number of cases ({})!'.format(len(Iwaves), len(cases)))\n\n if Qwaves is not None and (len(Qwaves) != len(cases)):\n raise ziUHFQCSeqCError(\n 'Number of Q channel waveforms ({}) does not match number of cases ({})!'.format(len(Iwaves), len(cases)))\n\n # Sanity check on I channel waveforms\n if Iwaves is not None:\n for i, Iwave in enumerate(Iwaves):\n if np.max(Iwave) > 1.0 or np.min(Iwave) < -1.0:\n raise KeyError(\n \"exceeding AWG range for I channel, all values should be within +/-1\")\n if len(Iwave) > 16384:\n raise KeyError(\n \"exceeding max AWG wave length of 16384 samples for I channel, trying to upload {} samples\".format(len(Iwave)))\n\n # Update waveform table\n self.set(zibase.gen_waveform_name(0, cases[i]), Iwave)\n\n # Sanity check on Q channel waveforms\n if Qwaves is not None:\n for i, Qwave in enumerate(Qwaves):\n if np.max(Qwave) > 1.0 or np.min(Qwave) < -1.0:\n raise KeyError(\n \"exceeding AWG range for Q channel, all values should be within +/-1\")\n if len(Qwave) > 16384:\n raise KeyError(\n \"exceeding max AWG wave length of 16384 samples for I channel, trying to upload {} samples\".format(len(Qwave)))\n\n # Update waveform table\n self.set(zibase.gen_waveform_name(1, cases[i]), Qwave)\n\n # Define the behavior of our program\n self._reset_awg_program_features()\n self._awg_program_features['loop_cnt'] = True\n self._awg_program_features['wait_dly'] = True\n self._awg_program_features['waves'] = True\n self._awg_program_features['cases'] = True\n\n # Updating cases will cause our AWG program to update\n self.cases(cases)\n\n def awg_sequence_acquisition_and_DIO_RED_test(\n self, Iwaves=None, Qwaves=None, cases=None, acquisition_delay=0,\n codewords=None, timeout=5):\n\n # setting the acquisition delay samples\n delay_samples = int(acquisition_delay*1.8e9/8)\n # setting the delay in the instrument\n self.awgs_0_userregs_2(delay_samples)\n sequence = (\n 'var wait_delay = getUserReg(2);\\n' +\n 'cvar i = 0;\\n'+\n 'const length = {};\\n'.format(len(codewords))\n )\n sequence = sequence + array2vect(\n codewords, \"codewords\")\n # starting the loop and switch statement\n sequence = sequence +(\n ' setDIO(2048);\\n'+\n 'for (i = 0; i < length; i = i + 1) {\\n'\n ' var codeword = codewords[i];\\n'+\n ' waitDIOTrigger();\\n' +\n ' setDIO(codeword);\\n'+\n ' wait(wait_delay);\\n' +\n ' setDIO(2048);\\n'+\n '}\\n'\n )\n\n # Define the behavior of our program\n self._reset_awg_program_features()\n\n self._awg_program[0] = sequence\n self._awg_needs_configuration[0] = True\n # self.awg_string(sequence, timeout=timeout)\n\n def awg_sequence_acquisition_and_pulse(self, Iwave=None, Qwave=None, acquisition_delay=0, dig_trigger=True) -> None:\n if Iwave is not None and (np.max(Iwave) > 1.0 or np.min(Iwave) < -1.0):\n raise KeyError(\n \"exceeding AWG range for I channel, all values should be within +/-1\")\n\n if Qwave is not None and (np.max(Qwave) > 1.0 or np.min(Qwave) < -1.0):\n raise KeyError(\n \"exceeding AWG range for Q channel, all values should be within +/-1\")\n\n if Iwave is not None and (len(Iwave) > 16384):\n raise KeyError(\n \"exceeding max AWG wave length of 16384 samples for I channel, trying to upload {} samples\".format(len(Iwave)))\n\n if Qwave is not None and (len(Qwave) > 16384):\n raise KeyError(\n \"exceeding max AWG wave length of 16384 samples for Q channel, trying to upload {} samples\".format(len(Qwave)))\n\n # Check the we have sufficient codewords defined\n if self._num_codewords < 1:\n raise zibase.ziConfigurationError(\n 'Insufficient number of codewords defined! Need at least 1 codeword.')\n\n # Configure the actual waveforms\n if Iwave is not None:\n self.set(zibase.gen_waveform_name(0, 0), Iwave)\n\n if Qwave is not None:\n self.set(zibase.gen_waveform_name(1, 0), Qwave)\n\n # Configure the delay\n self.set('awgs_0_userregs_{}'.format(UHFQC.USER_REG_WAIT_DLY),\n int(acquisition_delay*1.8e9/8))\n\n delay_string = \"\"\"\n wait(wait_dly);\n\"\"\"\n\n playWave_string = \"\"\"\n playWave({}, {});\n \"\"\".format(zibase.gen_waveform_name(0, 0), zibase.gen_waveform_name(1, 0))\n\n if dig_trigger:\n loop_start = \"\"\"\nrepeat (loop_cnt) {\n waitDigTrigger(1, 1);\n\"\"\"\n else:\n loop_start = \"\"\"\nrepeat (loop_cnt) {\n\"\"\"\n loop_end = \"\"\"\n setTrigger(ro_trig);\n setTrigger(ro_arm);\n waitWave();\n wait(4000);\n}\nsetTrigger(0);\n\"\"\"\n\n self._reset_awg_program_features()\n self._awg_program_features['loop_cnt'] = True\n self._awg_program_features['wait_dly'] = True\n self._awg_program_features['waves'] = True\n\n self._awg_program[0] = \\\n awg_sequence_acquisition_preamble() + \\\n loop_start + \\\n playWave_string + \\\n delay_string + \\\n loop_end\n\n self._awg_needs_configuration[0] = True\n\n def awg_sequence_acquisition(self):\n self._reset_awg_program_features()\n self._awg_program_features['loop_cnt'] = True\n\n self._awg_program[0] = awg_sequence_acquisition_preamble() + \"\"\"\nrepeat (loop_cnt) {\n waitDigTrigger(1, 1);\n setTrigger(ro_trig);\n setTrigger(ro_arm);\n}\nsetTrigger(0);\n\"\"\"\n # Reset delay\n self.wait_dly(0)\n self._awg_needs_configuration[0] = True\n\n def awg_sequence_acquisition_and_pulse_SSB(\n self, f_RO_mod, RO_amp, RO_pulse_length, acquisition_delay, dig_trigger=True) -> None:\n f_sampling = 1.8e9\n samples = RO_pulse_length*f_sampling\n array = np.arange(int(samples))\n sinwave = RO_amp * np.sin(2 * np.pi*array*f_RO_mod/f_sampling)\n coswave = RO_amp * np.cos(2 * np.pi*array*f_RO_mod/f_sampling)\n Iwave = (coswave+sinwave) / np.sqrt(2)\n Qwave = (coswave-sinwave) / np.sqrt(2)\n self.awg_sequence_acquisition_and_pulse(\n Iwave, Qwave, acquisition_delay, dig_trigger=dig_trigger)\n\n def spec_mode_on(self, acq_length=1/1500, IF=20e6, ro_amp=0.1, wint_length=2**14) -> None:\n self._reset_awg_program_features()\n self._awg_program_features['loop_cnt'] = True\n self._awg_program_features['avg_cnt'] = True\n self._awg_program_features['waves'] = True\n\n # Reset delay\n self.wait_dly(0)\n\n # Check the we have sufficient codewords defined\n if self._num_codewords < 1:\n raise zibase.ziConfigurationError(\n 'Insufficient number of codewords defined! Need at least 1 codeword.')\n\n # Define number of samples\n N = 16\n\n # Define alpha parameter\n alpha = 0.2\n\n # Define support parameters\n a0 = (1-alpha)/2\n a1 = 1/2\n a2 = alpha/2\n\n # Generate window function\n w = a0 - \\\n a1 * np.cos(2 * np.pi * np.arange(N)/(N-1)) + \\\n a2 * np.cos(4 * np.pi * np.arange(N)/(N-1))\n\n # Configure the actual waveforms\n self.set(zibase.gen_waveform_name(0, 0), w)\n self.set(zibase.gen_waveform_name(1, 0), w)\n\n playWave_string = \"\"\"\n playWave({}, {});\n \"\"\".format(zibase.gen_waveform_name(0, 0), zibase.gen_waveform_name(1, 0))\n\n wait_string = \"\"\"\n waitQAResultTrigger();\n wait(16);\n \"\"\"\n\n self._awg_program[0] = awg_sequence_acquisition_preamble() + \"\"\"\nrepeat (avg_cnt) {\n var wait_time = 0;\n\n repeat(loop_cnt) {\n wait_time = wait_time + 1;\n setTrigger(ro_trig);\n setTrigger(ro_arm);\n wait(wait_time);\n\"\"\" + playWave_string + wait_string + \"\"\"\n }\n}\nsetTrigger(0);\n\"\"\"\n\n # Also added by us\n self.awgs_0_outputs_0_mode(1)\n self.awgs_0_outputs_1_mode(1)\n\n # setting the internal oscillator to the IF\n self.oscs_0_freq(IF)\n\n self.sigouts_0_on(1)\n self.sigouts_1_on(1)\n\n # QuExpress thresholds on DIO (mode == 2), AWG control of DIO (mode == 1)\n self.dios_0_mode(2)\n # Drive DIO bits 31 to 16\n self.dios_0_drive(0xc)\n\n # setting the integration path to use the oscillator instead of\n # integration functions. Should be done before modifying the length.\n self.qas_0_integration_mode(1)\n\n self.qas_0_deskew_rows_0_cols_0(1.0)\n self.qas_0_deskew_rows_0_cols_1(0.0)\n self.qas_0_deskew_rows_1_cols_0(0.0)\n self.qas_0_deskew_rows_1_cols_1(1.0)\n self.qas_0_integration_length(wint_length)\n self.qas_0_delay(0)\n\n # Copy from the manual\n self.qas_0_rotations_0(1.0 + 0.0j)\n self.qas_0_rotations_1(0.0 + 1.0j)\n\n for i in range(0, 10):\n for j in range(0, 10):\n self.set('qas_0_crosstalk_rows_{0}_cols_{1}'.format(\n i, j), 1.0*(i == j))\n\n # Configure some thresholds\n for i in range(0, 10):\n self.set('qas_0_thresholds_{}_level'.format(i), 0.01)\n\n # Also adder by us\n # result_source 0 => lin_trans readout(includes crosstalk corr)\n self.qas_0_result_source(0)\n self.qas_0_result_enable(1)\n self.qas_0_result_statistics_enable(0)\n\n self._awg_needs_configuration[0] = True\n\n def spec_mode_off(self) -> None:\n # Resetting To regular Mode\n # changing int length\n self.qas_0_integration_mode(0)\n\n # Default settings copied\n self.qas_0_rotations_0(1.0 + 0.0j)\n self.qas_0_rotations_1(1.0 + 0.0j)\n\n # setting to DSB by default\n self.qas_0_deskew_rows_0_cols_0(1.0)\n self.qas_0_deskew_rows_0_cols_1(0.0)\n self.qas_0_deskew_rows_1_cols_0(0.0)\n self.qas_0_deskew_rows_1_cols_1(1.0)\n\n # switching off the modulation tone\n self.awgs_0_outputs_0_mode(0)\n self.awgs_0_outputs_1_mode(0)\n\n def plot_dio_snapshot(self, bits=range(32)):\n zibase.plot_timing_diagram(self.getv('awgs/0/dio/data'), bits, 64)\n\n ##########################################################################\n # 'public' functions: print overview helpers\n ##########################################################################\n\n def print_correlation_overview(self):\n msg = '\\tCorrelations overview \\n'\n for i in range(10):\n enabled = self.get('qas_0_correlations_{}_enable'.format(i))\n source = self.get('qas_0_correlations_{}_source'.format(i))\n msg += \"Correlations {}, enabled: {} \\tsource: {}\\n\".format(\n i, enabled, source)\n msg += '\\n\\tThresholded correlations overview \\n'\n for i in range(10):\n enabled = self.get(\n 'qas_0_thresholds_{}_correlation_enable'.format(i))\n source = self.get(\n 'qas_0_thresholds_{}_correlation_source'.format(i))\n msg += \"Thresholds correlation {}, enabled: {} \\tsource: {}\\n\".format(\n i, enabled, source)\n print(msg)\n\n def print_deskew_overview(self):\n msg = '\\tDeskew overview \\n'\n\n deskew_mat = np.zeros((2, 2))\n for i in range(2):\n for j in range(2):\n deskew_mat[i, j] = self.get(\n 'qas_0_deskew_rows_{}_cols_{}'.format(i, j))\n msg += 'Deskew matrix: \\n'\n msg += str(deskew_mat)\n print(msg)\n\n def print_crosstalk_overview(self):\n msg = '\\tCrosstalk overview \\n'\n msg += 'Bypass crosstalk: {} \\n'.format(self.qas_0_crosstalk_bypass())\n\n crosstalk_mat = np.zeros((10, 10))\n for i in range(10):\n for j in range(10):\n crosstalk_mat[i, j] = self.get(\n 'qas_0_crosstalk_rows_{}_cols_{}'.format(i, j))\n msg += 'Crosstalk matrix: \\n'\n print(msg)\n print(crosstalk_mat)\n\n def print_integration_overview(self):\n msg = '\\tIntegration overview \\n'\n msg += 'Integration mode: {} \\n'.format(\n self.qas_0_integration_mode())\n for i in range(10):\n msg += 'Integration source {}: {}\\n'.format(\n i, self.get('qas_0_integration_sources_{}'.format(i)))\n print(msg)\n\n def print_rotations_overview(self):\n msg = '\\tRotations overview \\n'\n for i in range(10):\n msg += 'Rotations {}: {}\\n'.format(\n i, self.get('qas_0_rotations_{}'.format(i)))\n print(msg)\n\n def print_thresholds_overview(self):\n msg = '\\t Thresholds overview \\n'\n for i in range(10):\n msg += 'Threshold {}: {}\\n'.format(\n i, self.get('qas_0_thresholds_{}_level'.format(i)))\n print(msg)\n\n def print_user_regs_overview(self):\n msg = '\\t User registers overview \\n'\n user_reg_funcs = ['']*16\n user_reg_funcs[0] = 'Loop count'\n user_reg_funcs[1] = 'Readout mode'\n user_reg_funcs[2] = 'Wait delay'\n user_reg_funcs[3] = 'Average count'\n user_reg_funcs[4] = 'Error count'\n\n for i in range(16):\n msg += 'User reg {}: \\t{}\\t({})\\n'.format(\n i, self.get('awgs_0_userregs_{}'.format(i)), user_reg_funcs[i])\n print(msg)\n\n def print_overview(self):\n \"\"\"\n Print a readable overview of relevant parameters of the UHFQC.\n\n N.B. This overview is not complete, but combines different\n print helpers\n \"\"\"\n self.print_correlation_overview()\n self.print_crosstalk_overview()\n self.print_deskew_overview()\n self.print_integration_overview()\n self.print_rotations_overview()\n self.print_thresholds_overview()\n self.print_user_regs_overview()\n\n ##########################################################################\n # DIO calibration functions\n ##########################################################################\n\n def _ensure_activity(self, awg_nr, timeout=5, verbose=False):\n \"\"\"\n Record DIO data and test whether there is activity on the bits activated in the DIO protocol for the given AWG.\n \"\"\"\n if verbose: print(\"Testing DIO activity for AWG {}\".format(awg_nr))\n\n vld_mask = 1 << self.geti('awgs/{}/dio/valid/index'.format(awg_nr))\n vld_polarity = self.geti('awgs/{}/dio/valid/polarity'.format(awg_nr))\n strb_mask = (1 << self.geti('awgs/{}/dio/strobe/index'.format(awg_nr)))\n strb_slope = self.geti('awgs/{}/dio/strobe/slope'.format(awg_nr))\n\n # Make sure the DIO calibration mask is configured\n if self._dio_calibration_mask is None:\n raise ValueError('DIO calibration bit mask not defined.')\n\n mask_value = self._dio_calibration_mask\n cw_mask = mask_value << 17\n\n for i in range(timeout):\n valid = True\n\n data = self.getv('awgs/0/dio/data')\n if data is None:\n raise zibase.ziValueError('Failed to get DIO snapshot!')\n\n vld_activity = 0\n strb_activity = 0\n cw_activity = 0\n for d in data:\n cw_activity |= (d & cw_mask)\n vld_activity |= (d & vld_mask)\n strb_activity |= (d & strb_mask)\n\n if cw_activity != cw_mask:\n print(\"Did not see all codeword bits toggle! Got 0x{:08x}, expected 0x{:08x}.\".format(cw_activity, cw_mask))\n valid = False\n\n if vld_polarity != 0 and vld_activity != vld_mask:\n print(\"Did not see valid bit toggle!\")\n valid = False\n\n if strb_slope != 0 and strb_activity != strb_mask:\n print(\"Did not see valid bit toggle!\")\n valid = False\n\n if valid:\n return True\n\n return False\n\n def _get_awg_dio_data(self, awg):\n data = self.getv('awgs/' + str(awg) + '/dio/data')\n ts = len(data)*[0]\n cw = len(data)*[0]\n for n, d in enumerate(data):\n ts[n] = d >> 10\n cw[n] = (d & ((1 << 10)-1))\n return (ts, cw)\n\n def _find_valid_delays(self, awg_nr, repetitions=1, verbose=False):\n \"\"\"Finds valid DIO delay settings for a given AWG by testing all allowed delay settings for timing violations on the\n configured bits. In addition, it compares the recorded DIO codewords to an expected sequence to make sure that no\n codewords are sampled incorrectly.\"\"\"\n if verbose: print(\" Finding valid delays\")\n\n vld_mask = 1 << self.geti('awgs/{}/dio/valid/index'.format(awg_nr))\n vld_polarity = self.geti('awgs/{}/dio/valid/polarity'.format(awg_nr))\n strb_mask = (1 << self.geti('awgs/{}/dio/strobe/index'.format(awg_nr)))\n strb_slope = self.geti('awgs/{}/dio/strobe/slope'.format(awg_nr))\n\n # Make sure the DIO calibration mask is configured\n if self._dio_calibration_mask is None:\n raise ValueError('DIO calibration bit mask not defined.')\n\n mask_value = self._dio_calibration_mask\n cw_mask = mask_value << 17\n\n combined_mask = cw_mask\n if vld_polarity != 0:\n combined_mask |= vld_mask\n if strb_slope != 0:\n combined_mask |= strb_mask\n if verbose: print(\" Using a mask value of 0x{:08x}\".format(combined_mask))\n\n valid_delays= []\n for delay in range(16):\n if verbose: print(' Testing delay {}'.format(delay))\n self.setd('raw/dios/0/delay', delay)\n time.sleep(1)\n valid_sequence = True\n for awg in [0]:\n error_timing = self.geti('raw/dios/0/error/timing')\n if error_timing & combined_mask != 0:\n valid_sequence = False\n\n if valid_sequence:\n valid_delays.append(delay)\n\n return set(valid_delays)\n\n ##########################################################################\n # DIO calibration functions for *CC*\n # FIXME: should not be in driver\n ##########################################################################\n\n def _prepare_CCL_dio_calibration(self, CCL, feedline=1, verbose=False):\n \"\"\"Configures a CCL with a default program that generates data suitable for DIO calibration.\n Also starts the program.\"\"\"\n cs_filepath = os.path.join(pycqed.__path__[0],\n 'measurement',\n 'openql_experiments',\n 'output', 'cs.txt')\n\n opc_filepath = os.path.join(pycqed.__path__[0],\n 'measurement',\n 'openql_experiments',\n 'output', 'qisa_opcodes.qmap')\n\n CCL.control_store(cs_filepath)\n CCL.qisa_opcode(opc_filepath)\n\n test_fp = os.path.abspath(os.path.join(pycqed.__path__[0],\n '..',\n 'examples','CCLight_example',\n 'qisa_test_assembly','calibration_cws_ro.qisa'))\n\n # Start the CCL with the program configured above\n CCL.eqasm_program(test_fp)\n CCL.start()\n\n # Set the DIO calibration mask to enable 5 bit measurement\n if feedline == 1:\n self._dio_calibration_mask = 0x1f\n elif feedline == 2:\n self._dio_calibration_mask = 0x3\n else:\n raise ValueError('Invalid feedline {} selected for calibration.'.format(feedline))\n\n def _prepare_QCC_dio_calibration(self, QCC, verbose=False):\n \"\"\"Configures a QCC with a default program that generates data suitable for DIO calibration. Also starts the QCC.\"\"\"\n\n cs_filepath = os.path.join(pycqed.__path__[0],\n 'measurement',\n 'openql_experiments',\n 's17', 'cs.txt')\n\n opc_filepath = os.path.join(pycqed.__path__[0],\n 'measurement',\n 'openql_experiments',\n 's17', 'qisa_opcodes.qmap')\n\n QCC.control_store(cs_filepath)\n QCC.qisa_opcode(opc_filepath)\n\n test_fp = os.path.abspath(os.path.join(pycqed.__path__[0],\n '..',\n 'examples','QCC_example',\n 'qisa_test_assembly','ro_calibration.qisa'))\n\n # Start the QCC with the program configured above\n QCC.stop()\n QCC.eqasm_program(test_fp)\n QCC.start()\n\n # Set the DIO calibration mask to enable 9 bit measurement\n self._dio_calibration_mask = 0x1ff\n\n def _prepare_HDAWG8_dio_calibration(self, HDAWG, verbose=False):\n \"\"\"Configures an HDAWG with a default program that generates data suitable for DIO calibration. Also starts the HDAWG.\"\"\"\n program = '''\nvar A = 0xffff0000;\nvar B = 0x00000000;\n\nwhile (1) {\n setDIO(A);\n wait(2);\n setDIO(B);\n wait(2);\n}\n'''\n HDAWG.configure_awg_from_string(0, program)\n HDAWG.seti('awgs/0/enable', 1)\n\n self._dio_calibration_mask = 0x7fff\n\n def calibrate_CC_dio_protocol(self, CC, feedline=None, verbose=False, repetitions=1):\n log.info('Calibrating DIO delays')\n if verbose: print(\"Calibrating DIO delays\")\n if feedline is None:\n raise ziUHFQCDIOCalibrationError('No feedline specified for calibration')\n\n CC_model = CC.IDN()['model']\n if 'QCC' in CC_model:\n self._prepare_QCC_dio_calibration(\n QCC=CC, verbose=verbose)\n elif 'CCL' in CC_model:\n self._prepare_CCL_dio_calibration(\n CCL=CC, feedline=feedline, verbose=verbose)\n elif 'HDAWG8' in CC_model:\n self._prepare_HDAWG8_dio_calibration(HDAWG=CC, verbose=verbose)\n elif 'cc' in CC_model:\n # expected_sequence = self._prepare_CC_dio_calibration(\n # CC=CC, verbose=verbose)\n return\n else:\n raise ValueError('CC model ({}) not recognized.'.format(CC_model))\n\n # Make sure the configuration is up-to-date\n self.assure_ext_clock()\n\n for awg in [0]:\n if not self._ensure_activity(awg, verbose=verbose):\n raise ziUHFQCDIOActivityError('No or insufficient activity found on the DIO bits associated with AWG {}'.format(awg))\n\n valid_delays = self._find_valid_delays(awg, repetitions, verbose=verbose)\n if len(valid_delays) == 0:\n raise ziUHFQCDIOCalibrationError('DIO calibration failed! No valid delays found')\n\n min_valid_delay = min(valid_delays)\n # Heuristics to get the 'best' delay in a sequence\n if (min_valid_delay+1) in valid_delays and (min_valid_delay+2) in valid_delays:\n min_valid_delay = min_valid_delay + 1\n\n # Print information\n if verbose: print(\" Valid delays are {}\".format(valid_delays))\n if verbose: print(\" Setting delay to {}\".format(min_valid_delay))\n\n # And configure the delays\n self._set_dio_calibration_delay(min_valid_delay)\n\n # Clear all detected errors (caused by DIO timing calibration)\n self.clear_errors()\n\n", "sub_path": "pycqed/instrument_drivers/physical_instruments/ZurichInstruments/UHFQuantumController.py", "file_name": "UHFQuantumController.py", "file_ext": "py", "file_size_in_byte": 70685, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "logging.getLogger", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.array_split", "line_number": 134, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument.ZI_base_instrument", "line_number": 146, "usage_type": "attribute"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument", "line_number": 146, "usage_type": "name"}, {"api_name": "time.time", "line_number": 194, "usage_type": "call"}, {"api_name": "time.time", "line_number": 239, "usage_type": "call"}, {"api_name": "qcodes.utils.helpers.full_class", "line_number": 270, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 284, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument.ziDeviceError", "line_number": 298, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument", "line_number": 298, "usage_type": "name"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument.ziOptionsError", "line_number": 307, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument", "line_number": 307, "usage_type": "name"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument.ziOptionsError", "line_number": 310, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument", "line_number": 310, "usage_type": "name"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument.ziValueError", "line_number": 318, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument", "line_number": 318, "usage_type": "name"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument.ziVersionError", "line_number": 326, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument", "line_number": 326, "usage_type": "name"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument.ziVersionError", "line_number": 330, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument", "line_number": 330, "usage_type": "name"}, {"api_name": "qcodes.instrument.parameter.ManualParameter", "line_number": 373, "usage_type": "name"}, {"api_name": "qcodes.utils.validators.Anything", "line_number": 381, "usage_type": "call"}, {"api_name": "qcodes.utils.validators", "line_number": 381, "usage_type": "name"}, {"api_name": "qcodes.utils.validators.Ints", "line_number": 392, "usage_type": "call"}, {"api_name": "qcodes.utils.validators", "line_number": 392, "usage_type": "name"}, {"api_name": "qcodes.utils.validators.Lists", "line_number": 407, "usage_type": "call"}, {"api_name": "qcodes.utils.validators", "line_number": 407, "usage_type": "name"}, {"api_name": "qcodes.utils.validators.Ints", "line_number": 417, "usage_type": "call"}, {"api_name": "qcodes.utils.validators", "line_number": 417, "usage_type": "name"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument.ziConfigurationError", "line_number": 432, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument", "line_number": 432, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 465, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument.ziValueError", "line_number": 580, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument", "line_number": 580, "usage_type": "name"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument.ziValueError", "line_number": 603, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument", "line_number": 603, "usage_type": "name"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument.ziValueError", "line_number": 608, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument", "line_number": 608, "usage_type": "name"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument.ziValueError", "line_number": 617, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument", "line_number": 617, "usage_type": "name"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument.gen_waveform_name", "line_number": 647, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument", "line_number": 647, "usage_type": "name"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument.gen_waveform_name", "line_number": 692, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument", "line_number": 692, "usage_type": "name"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument.gen_waveform_name", "line_number": 693, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument", "line_number": 693, "usage_type": "name"}, {"api_name": "numpy.eye", "line_number": 717, "usage_type": "call"}, {"api_name": "pycqed.utilities.general.check_keyboard_interrupt", "line_number": 857, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 866, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument.plot_timing_diagram", "line_number": 979, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument", "line_number": 979, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 996, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 997, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 997, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 997, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 998, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 998, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 998, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 1005, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1007, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1012, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1014, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 1020, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1021, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 1021, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 1021, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 1022, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 1022, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 1022, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 1024, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1026, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 1037, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 1038, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1048, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 1049, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 1050, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 1098, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument.ziConfigurationError", "line_number": 1101, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument", "line_number": 1101, "usage_type": "name"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument.ziConfigurationError", "line_number": 1107, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument", "line_number": 1107, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 1122, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 1122, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument.gen_waveform_name", "line_number": 1130, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument", "line_number": 1130, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 1135, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 1135, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument.gen_waveform_name", "line_number": 1143, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument", "line_number": 1143, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 1190, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 1190, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 1194, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 1194, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument.ziConfigurationError", "line_number": 1208, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument", "line_number": 1208, "usage_type": "name"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument.gen_waveform_name", "line_number": 1213, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument", "line_number": 1213, "usage_type": "name"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument.gen_waveform_name", "line_number": 1216, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument", "line_number": 1216, "usage_type": "name"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument.gen_waveform_name", "line_number": 1228, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument", "line_number": 1228, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 1282, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 1283, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 1283, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 1284, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 1284, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 1285, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 1286, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument.ziConfigurationError", "line_number": 1301, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument", "line_number": 1301, "usage_type": "name"}, {"api_name": "numpy.cos", "line_number": 1317, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 1317, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 1317, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 1318, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 1318, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 1318, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument.gen_waveform_name", "line_number": 1321, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument", "line_number": 1321, "usage_type": "name"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument.gen_waveform_name", "line_number": 1322, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument", "line_number": 1322, "usage_type": "name"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument.gen_waveform_name", "line_number": 1326, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument", "line_number": 1326, "usage_type": "name"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument.plot_timing_diagram", "line_number": 1415, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument", "line_number": 1415, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 1441, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1454, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument.ziValueError", "line_number": 1542, "usage_type": "call"}, {"api_name": "pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument", "line_number": 1542, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 1607, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1627, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1627, "usage_type": "attribute"}, {"api_name": "pycqed.__path__", "line_number": 1627, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1632, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1632, "usage_type": "attribute"}, {"api_name": "pycqed.__path__", "line_number": 1632, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 1640, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1640, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1640, "usage_type": "call"}, {"api_name": "pycqed.__path__", "line_number": 1640, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1660, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1660, "usage_type": "attribute"}, {"api_name": "pycqed.__path__", "line_number": 1660, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1665, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1665, "usage_type": "attribute"}, {"api_name": "pycqed.__path__", "line_number": 1665, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 1673, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1673, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1673, "usage_type": "call"}, {"api_name": "pycqed.__path__", "line_number": 1673, "usage_type": "attribute"}]} +{"seq_id": "492414165", "text": "import jinja2\nimport os\nimport webapp2\nfrom google.appengine.api import users\nfrom google.appengine.api import app_identity\nfrom google.appengine.api import mail\nfrom google.appengine.ext import ndb\nimport time\n\njinja_environment = jinja2.Environment(loader=\n jinja2.FileSystemLoader(os.path.dirname(__file__)))\n\n\nclass Human(ndb.Model):\n tors = ndb.StringProperty(required=True)\n name = ndb.StringProperty(required=True)\n year = ndb.StringProperty(required=True)\n school = ndb.StringProperty(required=True)\n major = ndb.StringProperty(required=True)\n email = ndb.StringProperty(required=True)\n subject = ndb.StringProperty(repeated=True)\n description = ndb.StringProperty(required=True)\n\nclass MainHandler(webapp2.RequestHandler):\n def get(self):\n user = users.get_current_user()\n if user:\n human_query = Human.query()\n human_query = human_query.filter(Human.email == user.email())\n human_data = human_query.fetch()\n if human_data:\n self.redirect('/homepage')\n else:\n greeting = ('Welcome, %s! (sign out)' %\n (user.nickname(), users.create_logout_url('/')))\n self.response.out.write('%s' % greeting)\n template = jinja_environment.get_template('templates/register.html')\n self.response.write(template.render())\n else:\n dictionary = {\n \"sign_in_url\": '\"Sign' %\n users.create_login_url('/')\n }\n template = jinja_environment.get_template('templates/signinpage.html')\n self.response.write(template.render(dictionary))\n\n def post(self):\n user = users.get_current_user()\n human1 = Human(tors=self.request.get('tors'),name=self.request.get('name'), year=self.request.get('year'), school=self.request.get('school'), major=self.request.get('major'), email=user.email(), subject=self.request.get_all('subject'), description=self.request.get('description'))\n human1.put()\n human_query = Human.query()\n human_query = human_query.filter(Human.email == user.email())\n human_data = human_query.fetch()\n self.redirect('/homepage')\n\n\n\n\nclass HomePageHandler(webapp2.RequestHandler):\n def get(self):\n user = users.get_current_user()\n time.sleep(1)\n human_query = Human.query()\n human_query = human_query.filter(Human.email == user.email())\n human_data = human_query.fetch()\n if human_data[0].tors == \"Tutor\":\n info = {\n 'university': human_data[0].school,\n 'major' : human_data[0].major,\n 'year' : human_data[0].year,\n 'name' : human_data[0].name,\n 'description':human_data[0].description,\n }\n template = jinja_environment.get_template('templates/tutorhome.html')\n self.response.write(template.render(info))\n greeting = ('Welcome, %s! (sign out)' %\n (user.nickname(), users.create_logout_url('/')))\n self.response.out.write('%s' % greeting)\n elif human_data[0].tors == \"Student\":\n info = {\n 'name': human_data[0].name,\n }\n template = jinja_environment.get_template('templates/studenthome.html')\n self.response.write(template.render(info))\n greeting = ('Welcome, %s! (sign out)' %\n (user.nickname(), users.create_logout_url('/')))\n self.response.out.write('%s' % greeting)\n\nclass ProfileHandler(webapp2.RequestHandler):\n def get(self):\n user = users.get_current_user()\n time.sleep(1)\n human_query = Human.query()\n human_query = human_query.filter(Human.email == user.email())\n human_data = human_query.fetch()\n info = {\n 'school': human_data[0].school,\n 'major' : human_data[0].major,\n 'year' : human_data[0].year,\n 'name' : human_data[0].name,\n 'description': human_data[0].description,\n }\n user_id=self.request.get('id')\n template = jinja_environment.get_template('templates/studentprofile.html')\n self.response.write(template.render(info))\n greeting = ('Welcome, %s! (sign out)' %\n (user.nickname(), users.create_logout_url('/')))\n self.response.out.write('%s' % greeting)\n\nclass ResultsHandler(webapp2.RequestHandler):\n def get(self):\n search = self.request.get('searchbox')\n user = users.get_current_user()\n human_query1 = Human.query()\n human_query1 = human_query1.filter(Human.email == user.email())\n human_data1 = human_query1.fetch()\n school_of_user = human_data1[0].school\n human_query = Human.query()\n human_query = Human.query(Human.subject == search)\n human_query = human_query.filter(Human.school == school_of_user)\n human_query = human_query.filter(Human.tors == \"Tutor\")\n human_data = human_query.fetch()\n names_of_results = \"\"\n keys_of_results = []\n y=0\n for x in human_data:\n keys_of_results.append({\"name\":Human.get_by_id(int(x.key.id())).name, \"link\":\"/profileviewer?id=\"+str(x.key.id())})\n y=y+1\n for x in human_data:\n names_of_results=names_of_results+\"
\"+x.name+\"
\"\n template_values = {\n 'results':keys_of_results,\n }\n template = jinja_environment.get_template('templates/results.html')\n self.response.write(template.render(template_values))\n\n\nclass ProfileViewerHandler(webapp2.RequestHandler):\n def get(self):\n user = users.get_current_user()\n user_id=self.request.get('id')\n tutor = {\n 'tutor': Human.get_by_id(int(user_id))\n }\n template = jinja_environment.get_template('templates/tutorhomeview.html')\n self.response.write(template.render(tutor))\n greeting = ('Welcome, %s! (sign out)' %\n (user.nickname(), users.create_logout_url('/')))\n self.response.out.write('%s' % greeting)\n\nclass SendMailHandler(webapp2.RequestHandler):\n def post(self):\n user = users.get_current_user()\n send_approved_mail(user.email(), self.request.get('id'), self.request.get('email'))\n self.response.content_type = 'text/plain'\n self.redirect('/homepage')\n\ndef send_approved_mail(sender_address ,emailr, content):\n mail.send_mail(sender=sender_address,\n to=emailr,\n subject=\"Tutor Request\",\n body=content)\n\nclass EditHandler(webapp2.RequestHandler):\n def get(self):\n user = users.get_current_user()\n human_query = Human.query()\n human_query = human_query.filter(Human.email == user.email())\n human_data = human_query.fetch()\n template = jinja_environment.get_template('templates/edit.html')\n info = {\n 'university': human_data[0].school,\n 'major' : human_data[0].major,\n 'year' : human_data[0].year,\n 'name' : human_data[0].name,\n 'description' : human_data[0].description\n }\n self.response.write(template.render(info))\n greeting = ('Welcome, %s! (sign out)' %\n (user.nickname(), users.create_logout_url('/')))\n self.response.out.write('%s' % greeting)\n def post(self):\n user = users.get_current_user()\n human_query = Human.query()\n human_query = human_query.filter(Human.email == user.email())\n human_data = human_query.fetch()\n human_data = human_data[0]\n names=self.request.get('name')\n universitys=self.request.get('school')\n majors=self.request.get('major')\n years=self.request.get('year')\n descriptions=self.request.get('description')\n if len(names)>0:\n human_data.name=names\n if len(universitys)>0:\n human_data.school=universitys\n if len(majors)>0:\n human_data.major=majors\n if len(years)>0:\n human_data.year=years\n if len(descriptions)>0:\n human_data.description=descriptions\n human_data.put()\n self.redirect('/profile')\n\napp = webapp2.WSGIApplication([\n ('/', MainHandler),\n ('/homepage', HomePageHandler),\n ('/profile',ProfileHandler),\n ('/results', ResultsHandler),\n ('/profileviewer', ProfileViewerHandler),\n ('/send_mail', SendMailHandler),\n ('/edit', EditHandler),\n], debug=True)\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 8754, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "jinja2.Environment", "line_number": 10, "usage_type": "call"}, {"api_name": "jinja2.FileSystemLoader", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.ndb.Model", "line_number": 14, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.ndb", "line_number": 14, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StringProperty", "line_number": 15, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 15, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StringProperty", "line_number": 16, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 16, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StringProperty", "line_number": 17, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 17, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StringProperty", "line_number": 18, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 18, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StringProperty", "line_number": 19, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 19, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StringProperty", "line_number": 20, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 20, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StringProperty", "line_number": 21, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 21, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StringProperty", "line_number": 22, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 22, "usage_type": "name"}, {"api_name": "webapp2.RequestHandler", "line_number": 24, "usage_type": "attribute"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 26, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 26, "usage_type": "name"}, {"api_name": "google.appengine.api.users.create_logout_url", "line_number": 35, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 35, "usage_type": "name"}, {"api_name": "google.appengine.api.users.create_login_url", "line_number": 42, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 42, "usage_type": "name"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 48, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 48, "usage_type": "name"}, {"api_name": "webapp2.RequestHandler", "line_number": 59, "usage_type": "attribute"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 61, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 61, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 62, "usage_type": "call"}, {"api_name": "google.appengine.api.users.create_logout_url", "line_number": 77, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 77, "usage_type": "name"}, {"api_name": "google.appengine.api.users.create_logout_url", "line_number": 86, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 86, "usage_type": "name"}, {"api_name": "webapp2.RequestHandler", "line_number": 89, "usage_type": "attribute"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 91, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 91, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 92, "usage_type": "call"}, {"api_name": "google.appengine.api.users.create_logout_url", "line_number": 107, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 107, "usage_type": "name"}, {"api_name": "webapp2.RequestHandler", "line_number": 110, "usage_type": "attribute"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 113, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 113, "usage_type": "name"}, {"api_name": "webapp2.RequestHandler", "line_number": 138, "usage_type": "attribute"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 140, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 140, "usage_type": "name"}, {"api_name": "google.appengine.api.users.create_logout_url", "line_number": 148, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 148, "usage_type": "name"}, {"api_name": "webapp2.RequestHandler", "line_number": 151, "usage_type": "attribute"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 153, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 153, "usage_type": "name"}, {"api_name": "google.appengine.api.mail.send_mail", "line_number": 159, "usage_type": "call"}, {"api_name": "google.appengine.api.mail", "line_number": 159, "usage_type": "name"}, {"api_name": "webapp2.RequestHandler", "line_number": 164, "usage_type": "attribute"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 166, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 166, "usage_type": "name"}, {"api_name": "google.appengine.api.users.create_logout_url", "line_number": 180, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 180, "usage_type": "name"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 183, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 183, "usage_type": "name"}, {"api_name": "webapp2.WSGIApplication", "line_number": 206, "usage_type": "call"}]} +{"seq_id": "651968242", "text": "from PIL import Image\nfrom ast import literal_eval\n \nimg = Image.new('RGB', (50, 50), color=(0,0,0))\nimg.save('canvas.bmp')\n\nvalores = []\n\nwith open('matriz.txt', 'r') as f:\n valores = f.readlines()\n\nvalores = [x.strip() for x in valores] \n\nim = Image.open('canvas.bmp')\npix = im.load()\n\nres = list(map(literal_eval, valores))\n\nfor x in range(0, im.size[0]):\n for y in range(0, im.size[1]):\n tupla = (res[y][x][0], res[y][x][1], res[y][x][2])\n pix[x,y] = tupla\n\nim.save('canvas.bmp')\n \n", "sub_path": "Programacion/[Prog 02] BMP/Solver/solve.py", "file_name": "solve.py", "file_ext": "py", "file_size_in_byte": 513, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "PIL.Image.new", "line_number": 4, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 4, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 14, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 14, "usage_type": "name"}, {"api_name": "ast.literal_eval", "line_number": 17, "usage_type": "argument"}]} +{"seq_id": "113449122", "text": "# Multiply Layer Test\nimport sys\nimport os\nfrom pathlib import Path\ntry:\n sys.path.append(os.path.join(Path(os.getcwd()).parent, 'lib'))\n from layers import Multiply\nexcept ImportError:\n raise ImportError(\"Library Module Can Not Found\")\n\napple = 100\napplecount = 5\ndiscount = 0.9\n\n# layers\nmultiply_appleprice = Multiply()\nmultiply_discountprice = Multiply()\n\n# foward propagation\nappleprice = multiply_appleprice.forward(apple, applecount)\nprint(f'appleprice = {appleprice}')\n\ndiscountprice = multiply_discountprice.forward(appleprice, discount)\nprint(f'discountprice = {discountprice}')\n\n# backward propagation\nddiscountprice = 1\n\ndappleprice, ddiscount = multiply_discountprice.backward(ddiscountprice)\nprint(f'dappleprice = {dappleprice}, ddiscount={ddiscount}')\n\ndapple, dapplecount = multiply_appleprice.backward(dappleprice)\nprint(f'dapple = {dapple}, dapplecount={dapplecount}')\n\n\n", "sub_path": "04.deep-learning/02.neural-network/07.backpropagation/ex01.py", "file_name": "ex01.py", "file_ext": "py", "file_size_in_byte": 898, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "sys.path.append", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 6, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 6, "usage_type": "call"}, {"api_name": "layers.Multiply", "line_number": 16, "usage_type": "call"}, {"api_name": "layers.Multiply", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "350459555", "text": "from selenium import webdriver\nfrom selenium.webdriver.support.ui import Select\n\nimport time\n# import math\nimport os\n\n# def calc(x):\n# return str(math.log(abs(12*math.sin(int(x)))))\ntry:\n link = \"http://suninjuly.github.io/file_input.html\"\n browser = webdriver.Chrome()\n browser.get(link)\n\n fname = browser.find_element_by_css_selector(\"input[name='firstname']\")\n fname.send_keys(\"Alexandr\")\n\n lname = browser.find_element_by_css_selector(\"input[name='lastname']\")\n lname.send_keys(\"Pushkin\")\n\n email = browser.find_element_by_css_selector(\"input[name='email']\")\n email.send_keys(\"alex@ya.ru\")\n\n current_dir = os.path.abspath(os.path.dirname(__file__)) # получаем путь к директории текущего исполняемого файла\n file_path = os.path.join(current_dir, 'file.txt') # добавляем к этому пути имя файла\n ffile = browser.find_element_by_css_selector(\"input#file\")\n ffile.send_keys(file_path)\n\n button = browser.find_element_by_css_selector(\".btn.btn-primary\")\n\n # Отправляем заполненную форму\n dis = button.get_attribute(\"disabled\")\n if dis != \"disabled\":\n button.click()\n # ожидание\n time.sleep(10)\n else:\n assert \"Error\"\nfinally:\n # закрываем браузер после всех манипуляций\n browser.quit()\n", "sub_path": "lesson22_step8.py", "file_name": "lesson22_step8.py", "file_ext": "py", "file_size_in_byte": 1420, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 12, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 12, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "63325097", "text": "import os\nimport uuid\nimport msgpack\nimport msgpack_numpy as m\nimport numpy\nimport pika\nimport cv2\n\namqp_url='amqp://tijjoigp:0uzZbSC8N5fxxkHsgXKaB5CcE4eKjKWf@lark.rmq.cloudamqp.com/tijjoigp'\nurl = os.environ.get('CLOUDAMQP_URL',amqp_url)\nparams = pika.URLParameters(url)\nparams.socket_timeout = 5\n#initiate the connexion\nconnection = pika.BlockingConnection(params)\n\nchannel = connection.channel()\nresult = channel.queue_declare(exclusive=True)\ncallback_queue = result.method.queue\n\ncorr_id = str(uuid.uuid4())\nmessageBody = numpy.random.random((20,30))\n#messageJson = {'type': 0, 'value': 'Test'}\nencoded_message = m.packb(messageBody, default = m.encode)\n##\n# Publish message in queue\nchannel.basic_publish(exchange='',\n routing_key='rpc_queue',\n properties=pika.BasicProperties(\n reply_to = callback_queue,\n correlation_id = corr_id\n ),\n body=encoded_message)\n\n\n#print(\" [x] Sent \"+encoded_message)\n\n\n##\n# Get response from server side and close connection if gets datas\nresponse=None\ndef on_response(ch, method, props, body):\n # Check if it's our message\n if corr_id != props.correlation_id:\n raise Exception()\n\n global response\n response=str(body)\n print(m.unpackb(response, object_hook = m.decode))\n\nprint('Starting to wait on the response queue')\nchannel.basic_consume(on_response, no_ack=True,\n queue=callback_queue)\nwhile response is None: # wait for an answer\n connection.process_data_events()\nconnection.close()\n", "sub_path": "assignments/Session4/rpc_client.py", "file_name": "rpc_client.py", "file_ext": "py", "file_size_in_byte": 1638, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "os.environ.get", "line_number": 10, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pika.URLParameters", "line_number": 11, "usage_type": "call"}, {"api_name": "pika.BlockingConnection", "line_number": 14, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 21, "usage_type": "attribute"}, {"api_name": "msgpack_numpy.packb", "line_number": 23, "usage_type": "call"}, {"api_name": "msgpack_numpy.encode", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pika.BasicProperties", "line_number": 28, "usage_type": "call"}, {"api_name": "msgpack_numpy.unpackb", "line_number": 48, "usage_type": "call"}, {"api_name": "msgpack_numpy.decode", "line_number": 48, "usage_type": "attribute"}]} +{"seq_id": "358779889", "text": "import pandas as pd\nimport numpy as np\nimport openpyxl as xl\nimport statistics as stats\nfrom datetime import date, datetime\nfrom toolbox import CBD\nimport math\nimport urllib\nimport os\n\n\nBASE_RATES = None\n\n\ndef create_file(url, filename):\n response = urllib.request.urlopen(url)\n html = response.read()\n\n with open(filename, 'wb') as f:\n f.write(html)\n\n\ndef update_file(url, filename):\n if(os.path.isfile(filename)):\n file_date = datetime.fromtimestamp(\n os.path.getctime(filename))\n if file_date.date() != date.today():\n create_file(url, filename)\n else:\n create_file(url, filename)\n\n\ndef calculate_libor_dist():\n\n # scrapes historic boe base rates\n br_url = 'https://www.bankofengland.co.uk/boeapps/iadb/fromshowcolumns' \\\n '.asp?Travel=NIxRPxSUx&FromSeries=1&ToSeries=50&DAT=RNG&FD=14&FM=Oct' \\\n '&FY=2014&TD=26&TM=Feb&TY=2019&VFD=Y&CSVF=TN&C=13T&Filter=N&csv' \\\n '.x=28&csv.y=22'\n update_file(br_url, 'boe_hist_rates.csv')\n br = pd.read_csv('boe_hist_rates.csv')\n\n wb = xl.load_workbook('LIBOR.xlsx')\n la = wb[\"actual\"]\n\n libor = []\n\n row = 1\n # loops through historic libor, subtracts base rate of period\n while la.cell(row=row, column=3).value is not None:\n ld = la.cell(row, 3).value.date()\n if(la.cell(row, 1).value == '3 Month'):\n libor_i = la.cell(row, 2).value\n if not (ld > date(2018, 3, 7) and ld < date(2018, 5, 29)):\n bank_rate = br.loc[br['DATE'] == ld.strftime(\n '%d %b %Y')].iloc[0]['IUDBEDR']\n adj_libor = libor_i - bank_rate\n libor.append(adj_libor)\n row += 1\n\n return {'mean': stats.mean(libor), 'std_dev': stats.stdev(libor)}\n\n\ndef project_base_rates(start_date):\n # scrapes projected base rates\n br_url = 'https://public.tableau.com/views/Conditioningassumptions/CSV' \\\n '.csv?:showVizHome=no'\n update_file(br_url, 'boe_proj_rates.csv')\n br = pd.read_csv('boe_proj_rates.csv')\n br = br[br['Measure'] == 'Bank rate forecast']\n br.drop('Measure', axis=1, inplace=True)\n # converts date format in scraped data to match model\n for i, r in enumerate(br['Quarter']):\n q = date(int(r[:4]), 1 + ((int(r[-1:]) - 1) * 3), 1)\n br.at[i, 'Quarter'] = q\n prev_rate = 0.0\n maturity_date = br[-1:]['Quarter'].iloc[0]\n structure = pd.DataFrame()\n payment_dates = pd.Series(pd.date_range(\n start_date, maturity_date, freq=CBD()))\n structure['dates'] = payment_dates\n structure['base rate'] = [0.0] * len(payment_dates)\n # sets the projected rate, and then sets dates out of scope as the last one\n for i, r in enumerate(structure['dates']):\n rate_set = False\n r = r.date() # date\n prev_date = None\n for j, p in enumerate(br['Quarter']):\n if prev_date is not None:\n if (prev_date < r) and (r <= p):\n structure.at[i, 'base rate'] = br['Value'].iloc[[j - 1]]\n rate_set = True\n prev_date = p\n last_date = prev_date\n structure = structure.set_index('dates')\n return structure[structure.index <= pd.to_datetime(last_date)]\n\n\ndef historical_libor():\n prev_date = None\n wb = xl.load_workbook('LIBOR.xlsx')\n # contains historic libor data\n la = wb[\"actual\"]\n # populates historic libor rates into structure\n # loops through all the data to find a matching date\n row = 1\n libor = {}\n while la.cell(row=row, column=3).value is not None:\n if(la.cell(row, 1).value == '3 Month'):\n libor[la.cell(row, 3).value.date()] = la.cell(row, 2).value\n row += 1\n # libor_df = pd.DataFrame.from_dict(libor)\n libor_df = pd.DataFrame(list(libor.items()), columns=['Date', 'LIBOR'])\n return libor_df\n\n\ndef libor_projection(libor_df, avg, std_dev):\n # scrapes projected boe base rates from boe website\n start_date = libor_df[-1:]['Date'].iloc[0]\n global BASE_RATES\n BASE_RATES = project_base_rates(start_date)\n # generates a normal distribution based on the number of future quarters\n libor_prem = np.random.normal(avg, std_dev, len(BASE_RATES)).round(4)\n libor = {}\n for i, row in enumerate(BASE_RATES.itertuples(index=True)):\n curr_date = row[0]\n base_rate = row[1]\n libor[curr_date] = base_rate + libor_prem[i]\n libor_rates = pd.DataFrame(list(libor.items()), columns=['dates', 'LIBOR'])\n BASE_RATES.reset_index(inplace=True)\n return libor_rates\n\n\ndef libor(num_simulations):\n libor_set = []\n\n avg = calculate_libor_dist()['mean']\n std_dev = calculate_libor_dist()['std_dev']\n hl = historical_libor()\n\n for i in range(num_simulations):\n libor_set.append(libor_projection(hl, avg, std_dev))\n\n return libor_set\n", "sub_path": "libor.py", "file_name": "libor.py", "file_ext": "py", "file_size_in_byte": 4841, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "urllib.request.urlopen", "line_number": 16, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 25, "usage_type": "name"}, {"api_name": "os.path.getctime", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "datetime.date.today", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 27, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 41, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 54, "usage_type": "call"}, {"api_name": "statistics.mean", "line_number": 61, "usage_type": "call"}, {"api_name": "statistics.stdev", "line_number": 61, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 69, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 74, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 78, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 79, "usage_type": "call"}, {"api_name": "pandas.date_range", "line_number": 79, "usage_type": "call"}, {"api_name": "toolbox.CBD", "line_number": 80, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 96, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 101, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 123, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 129, "usage_type": "call"}]} +{"seq_id": "29921749", "text": "from work_with_url import URLWorker\nfrom work_with_links import LinksWorker\nfrom threading import Thread\nfrom queue import Queue\nimport requests\nimport bs4\nimport os.path\nimport unittest\n\n\nclass TestWorkWithURL(unittest.TestCase):\n\n def test_get_name(self):\n url = \"https://www.google.ru\"\n expected_name = \"wwwgoogleru\"\n actual_name = URLWorker.get_name(URLWorker(url))\n self.assertEqual(expected_name, actual_name)\n\n def test_get_soup(self):\n url = URLWorker(\"https://lenta.ru\")\n actual = url.get_soup()\n page = requests.get(\"https://lenta.ru\")\n data = page.text\n expected = bs4.BeautifulSoup(data)\n self.assertEqual(actual, expected)\n\n def test_save_page_error(self):\n url = URLWorker(\"https://lentabc.ru\")\n actual = url.save_page(\"tests\")\n self.assertEqual(actual, ConnectionError)\n\n def test_save_page(self):\n url = URLWorker(\"https://lenta.ru\")\n url.save_page(\"tests\")\n actual = os.path.isfile(r\"tests\\lentaru.html\")\n self.assertEqual(actual, True)\n\n def test_robots_true(self):\n url = URLWorker(\"https://lenta.ru\")\n actual = url.process_robot_txt()\n self.assertEqual(actual, True)\n\n def test_robot_false(self):\n url = URLWorker(\"https://github.com\")\n actual = url.process_robot_txt()\n self.assertEqual(actual, False)\n\n def test_save_division_for_list(self):\n link = Queue()\n link.put(\"http://vilenin.narod.ru/Mm/Books/5/book.htm\")\n thread = Thread()\n thread.start()\n URLWorker.save_division(thread, link, \"tests\")\n self.assertEqual(link.qsize(), 0)\n\n def test_save_division_download(self):\n link = Queue()\n link.put(\"http://vilenin.narod.ru/Mm/Books/5/book.htm\")\n thread = Thread()\n thread.start()\n URLWorker.save_division(thread, link, \"tests\")\n actual = os.path.isfile(r\"tests\\httpvileninnarodruMmBooks5bookhtm.html\")\n self.assertEqual(actual, True)\n\n\nclass TestWorkWithLinks(unittest.TestCase):\n\n def test_get_links_len(self):\n link = \"http://vilenin.narod.ru/Mm/Books/5/book.htm\"\n soup = URLWorker(link).get_soup()\n list_links = LinksWorker.get_links(soup)\n self.assertEqual(len(list_links), 2)\n\n def test_get_links_content(self):\n link = \"http://vilenin.narod.ru/Mm/Books/5/book.htm\"\n soup = URLWorker(link).get_soup()\n list_links = LinksWorker.get_links(soup)\n self.assertEqual(list_links[0], \"http://top100.rambler.ru/top100/\")\n self.assertEqual(list_links[1], \"http://www.ucoz.ru/\")\n", "sub_path": "test_crawler.py", "file_name": "test_crawler.py", "file_ext": "py", "file_size_in_byte": 2642, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "unittest.TestCase", "line_number": 11, "usage_type": "attribute"}, {"api_name": "work_with_url.URLWorker.get_name", "line_number": 16, "usage_type": "call"}, {"api_name": "work_with_url.URLWorker", "line_number": 16, "usage_type": "name"}, {"api_name": "work_with_url.URLWorker", "line_number": 20, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 22, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 24, "usage_type": "call"}, {"api_name": "work_with_url.URLWorker", "line_number": 28, "usage_type": "call"}, {"api_name": "work_with_url.URLWorker", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.path.isfile", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 35, "usage_type": "name"}, {"api_name": "work_with_url.URLWorker", "line_number": 39, "usage_type": "call"}, {"api_name": "work_with_url.URLWorker", "line_number": 44, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 49, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 51, "usage_type": "call"}, {"api_name": "work_with_url.URLWorker.save_division", "line_number": 53, "usage_type": "call"}, {"api_name": "work_with_url.URLWorker", "line_number": 53, "usage_type": "name"}, {"api_name": "queue.Queue", "line_number": 57, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 59, "usage_type": "call"}, {"api_name": "work_with_url.URLWorker.save_division", "line_number": 61, "usage_type": "call"}, {"api_name": "work_with_url.URLWorker", "line_number": 61, "usage_type": "name"}, {"api_name": "os.path.path.isfile", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 62, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 66, "usage_type": "attribute"}, {"api_name": "work_with_url.URLWorker", "line_number": 70, "usage_type": "call"}, {"api_name": "work_with_links.LinksWorker.get_links", "line_number": 71, "usage_type": "call"}, {"api_name": "work_with_links.LinksWorker", "line_number": 71, "usage_type": "name"}, {"api_name": "work_with_url.URLWorker", "line_number": 76, "usage_type": "call"}, {"api_name": "work_with_links.LinksWorker.get_links", "line_number": 77, "usage_type": "call"}, {"api_name": "work_with_links.LinksWorker", "line_number": 77, "usage_type": "name"}]} +{"seq_id": "638963842", "text": "#!/usr/bin/python3\n\"\"\"Start a Flask web application\"\"\"\n\n\nfrom models import storage\nfrom flask import Flask, render_template\nfrom models.state import State\n\n\napp = Flask(__name__)\n\n\n@app.teardown_appcontext\ndef close_db(self):\n \"\"\"Close session\"\"\"\n storage.close()\n\n\n@app.route('/states_list', strict_slashes=False)\ndef stateList():\n \"\"\"function to execute\"\"\"\n states = storage.all(State).values()\n return render_template('7-states_list.html', states=states)\n\n\n@app.route('/cities_by_states', strict_slashes=False)\ndef cityList():\n \"\"\"function to execute\"\"\"\n states = storage.all(State).values()\n return render_template('8-cities_by_states.html', states=states)\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=\"5000\")\n", "sub_path": "web_flask/8-cities_by_states.py", "file_name": "8-cities_by_states.py", "file_ext": "py", "file_size_in_byte": 756, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "flask.Flask", "line_number": 10, "usage_type": "call"}, {"api_name": "models.storage.close", "line_number": 16, "usage_type": "call"}, {"api_name": "models.storage", "line_number": 16, "usage_type": "name"}, {"api_name": "models.storage.all", "line_number": 22, "usage_type": "call"}, {"api_name": "models.state.State", "line_number": 22, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 22, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 23, "usage_type": "call"}, {"api_name": "models.storage.all", "line_number": 29, "usage_type": "call"}, {"api_name": "models.state.State", "line_number": 29, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "44191550", "text": "from django.conf.global_settings import LOGIN_URL\nfrom django.core.urlresolvers import reverse\nfrom testbase.unit import UnitTestCase\n\n\nclass TestExpireSession(UnitTestCase):\n def test_expiresClientSessionForNextRequest(self):\n loginRequiredUrl = reverse('requires_login')\n\n user = self.createUser()\n self.logInAs(user)\n self.client.get(loginRequiredUrl)\n\n self.expireSession()\n response = self.client.get(loginRequiredUrl)\n\n expectedRedirect = '{}?next={}'.format(LOGIN_URL, loginRequiredUrl)\n self.assertRedirects(response, expectedRedirect)\n\n", "sub_path": "testsite/testapp/tests/unit/testExpireSession.py", "file_name": "testExpireSession.py", "file_ext": "py", "file_size_in_byte": 604, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "testbase.unit.UnitTestCase", "line_number": 6, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.global_settings.LOGIN_URL", "line_number": 17, "usage_type": "argument"}]} +{"seq_id": "395007400", "text": "\nimport pandas as pd\nfrom sklearn.cluster import KMeans\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn.cluster import DBSCAN\n\ndf = pd.read_csv(r'C:\\py_file\\consumption_data.csv')\n#print(df)\ndf = df.ix[:,['R','F','M']]\n#print(df)\ndf = df[(df.F<45)&(df.M<30000)]\n#print(df.describe())\ndf2 = (df - df.mean())/df.std()\n#print(df2)\nmodel = KMeans(n_clusters=5,max_iter=500)\nmodel.fit(df2)\n\nr1 = pd.Series(model.labels_).value_counts()\nr2 = pd.DataFrame(model.cluster_centers_)\nr = pd.concat([r2,r1],axis=1)\nr.columns = list(df2.columns)+[u'类别数目']\nprint(r)\n\n\ny_pred = model.predict(df2)\nsd = plt.figure().add_subplot(111,projection='3d')\nsd.scatter(df.R,df.F,df.M,c=y_pred)\nsd.set_xlabel('R')\nsd.set_ylabel('F')\nsd.set_zlabel('M')\nplt.show()", "sub_path": "k_mean.py", "file_name": "k_mean.py", "file_ext": "py", "file_size_in_byte": 781, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pandas.read_csv", "line_number": 8, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 20, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}]} +{"seq_id": "327140103", "text": "import pandas as pd\nfrom pyecharts import Line\ndf = pd.read_csv('D:/data/beijing_AQI_2018.csv')\nattr = df['Date']\nv1 = df['PM']\nline = Line(\"2018年北京PM2.5全年走势图\", title_pos='center',title_top='18',\n width=800, height=400)\nline.add(\"PM2.5值:\", attr, v1, mark_line=['average'], is_fill=True,\n area_color=\"#000\", area_opacity=0.3, mark_point=[\"max\", \"min\"],\n mark_point_symbol=\"circle\", mark_point_symbolsize=25)\nline.render(\"2018年北京PM2.5全年走势图.html\")", "sub_path": "py10_可视化/第二课/2018年北京PM2.5全年走势图.py", "file_name": "2018年北京PM2.5全年走势图.py", "file_ext": "py", "file_size_in_byte": 505, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pandas.read_csv", "line_number": 3, "usage_type": "call"}, {"api_name": "pyecharts.Line", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "255780053", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 4 00:36:25 2021\n\n@author: pc\n\"\"\"\n\nimport numpy as np\nimport pandas\nfrom sklearn.neighbors import KNeighborsRegressor\n\ntoy_example = pandas.read_csv(\"Week 2 Toy Example.csv\", header = 0)\n\n# Specify the data\nX = toy_example[['x1', 'x2']]\nY = toy_example['y']\n\n# Build nearest neighbors\nkNNSpec = KNeighborsRegressor(n_neighbors=2, metric='euclidean')\nnbrs = kNNSpec.fit(X, Y)\ndistances, indices = nbrs.kneighbors(X)\n\n# Calculate prediction, errors, and sum of squared error\npred_y = nbrs.predict(X)\nerror_y = Y - pred_y\nsse_y = np.sum(error_y ** 2)\n", "sub_path": "Lecture Materials/Lec 02/Week 2 Toy Example Prediction.py", "file_name": "Week 2 Toy Example Prediction.py", "file_ext": "py", "file_size_in_byte": 595, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pandas.read_csv", "line_number": 12, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsRegressor", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "513487669", "text": "# -*- coding: utf-8 -*-\nimport math\nfrom .params import *\nimport os\nimport datetime\nfrom art import text2art\n\ndef isfloat(value):\n try:\n float(value)\n return True\n except ValueError:\n return False\n\ndef Enernst_Calc(T,PH2,PO2):\n '''\n This function calculate Enernst\n :param T: Cell Operation Temperature [K]\n :param PH2: Partial Pressure [atm]\n :param PO2: partial Pressure [atm]\n :return: Enernst [V}\n '''\n try:\n result=1.229-(8.5*(10**-4))*(T-298.15)+(4.308*(10**-5))*T*(math.log(PH2)+0.5*math.log(PO2))\n return result\n except Exception:\n print(\"[Error] Enernst Calculation Faild\")\n\ndef CH2_Calc(PH2,T):\n '''\n This function calculate CH2\n :param PH2: Partial Pressure [atm]\n :param T: Cell Operation Temperature [K]\n :return: CH2 [mol/cm^3]\n '''\n try:\n result=PH2/(1.09*(10**6)*math.exp(77/T))\n return result\n except Exception:\n print(\"[Error] CH2 Calculation Faild\")\n\ndef CO2_Calc(PO2,T):\n '''\n This function calculate CO2\n :param PO2: Partial Pressure [atm]\n :param T: Cell Operation Temperature [K]\n :return: CO2 [mol/cm^3]\n '''\n try:\n result=PO2/(5.08*(10**6)*math.exp(-498/T))\n return result\n except Exception:\n print(\"[Error] CO2 Calculation Faild\")\n\ndef Rho_Calc(i,A,T,lambda_param):\n '''\n This function calculate Rho\n :param i: Cell load current [A]\n :param A: active area [cm^2]\n :param T: Cell Operation Temperature [K]\n :param lambda_param: is an adjustable parameter with a possible maximum value of 23\n :return: Rho -- > Membrane Specific Resistivity [ohm.cm]\n '''\n try:\n result=(181.6*(1+0.03*(i/A)+0.062*((T/303)**2)*((i/A)**2.5)))/((lambda_param-0.634-3*(i/A))*math.exp(4.18*((T-303)/T)))\n return result\n except Exception:\n print(\"[Error] Rho Calculation Faild\")\n\ndef Xi2_Calc(A,PH2,T):\n '''\n This function calculate Xi2\n :param A: active area [cm^2]\n :param PH2: Partial Pressure [atm]\n :param T: Cell Operation Temperature [K]\n :return: Xi2\n '''\n try:\n CH2=CH2_Calc(PH2,T)\n result=0.00286+0.0002*math.log(A)+(4.3*(10**-5))*math.log(CH2)\n return result\n except Exception:\n print(\"[Error] Xi2 Calculation Faild\")\n\ndef Eta_Conc_Calc(i,A,B,JMax):\n '''\n This function calculate Eta Concentration\n :param i: Cell load current [A]\n :param A: active area [cm^2]\n :return: Eta Concentration\n '''\n try:\n if i!=0:\n J=(i/A)\n result=-B*math.log(1-(J/JMax))\n return result\n else:\n return 0\n except Exception:\n print(\"[Error] Eta Concentration Calculation Faild\")\n\ndef Eta_Ohmic_Calc(i,l,A,T,lambda_param,R_elec=None):\n '''\n This function calculate Eta Ohmic\n :param i: cell load current [A]\n :param l: Membrane Thickness [cm]\n :param A: active area [cm^2]\n :param T: Cell Operation Temperature [K]\n :param lambda_param: is an adjustable parameter with a possible maximum value of 23\n :return: Eta Ohmic\n '''\n try:\n if i!=0:\n Rho=Rho_Calc(i,A,T,lambda_param)\n R_prot=(Rho*l)/A\n R_total=R_prot\n if isfloat(R_elec)==True:\n R_total+=R_elec\n result=i*R_total\n return result\n else:\n return 0\n except Exception:\n print(\"[Error] Eta Ohmic Calculation Faild\")\n\ndef Eta_Act_Calc(T,PO2,PH2,i,A):\n '''\n This function calculate Eta Activation\n :param T: Cell Operation Temperature [K]\n :param PO2: Partial Pressure [atm]\n :param i: cell load current [A]\n :return: Eta Activation\n '''\n try:\n if i!=0:\n CO2=CO2_Calc(PO2,T)\n xi2=Xi2_Calc(A,PH2,T)\n result=-(xi1+xi2*T+xi3*T*math.log(CO2)+xi4*T*math.log(i))\n return result\n else:\n return 0\n except Exception:\n print(\"[Error] Eta Activation Calculation Faild\")\n\ndef Efficiency_Calc(Vcell):\n '''\n This function calculate PEM Cell Efficiency\n :param Vcell: Cell Voltage [V]\n :return: Efficiency\n '''\n try:\n result=(uF*Vcell)/HHV\n return result\n except Exception:\n print(\"[Error] PEM Efficiency Calculation Faild\")\n\ndef VStack_Calc(N,Enernst,Loss):\n '''\n This function calculate VStack\n :param N: number of single cells\n :param Enernst: Enernst Voltage [V}\n :param Loss: Loss [V]\n :return: VStack [V]\n '''\n try:\n reuslt=N*(Enernst-Loss)\n return reuslt\n except Exception:\n print(\"[Error] VStack Calculation Error\")\n\ndef Get_Input():\n '''\n This function get inputs from users\n :return: Input Dictionary\n '''\n try:\n Input_Keys=list(InputParams.keys())\n Input_Keys.sort()\n Input_Values=[]\n for item in Input_Keys:\n Input_Flag=False\n while(Input_Flag==False):\n Input_Item=input(\"Please Enter \"+item+\"(\"+InputParams[item]+\") : \")\n if isfloat(Input_Item)==True:\n Input_Flag=True\n else:\n print(\"[Error] Bad Input Try Again\")\n Input_Values.append(Input_Item)\n Input_Values=list(map(float,Input_Values))\n Output=dict(zip(Input_Keys,Input_Values))\n if Output[\"lambda\"]>23:\n Output[\"lambda\"]=23\n print(\"[Warning] Opem Automatically Set Lambda To Maximum Value (23) \")\n return Output\n except Exception:\n print(\"Bad Input\")\n return False\n\ndef Output_Save(OutputDict,i,file):\n '''\n This function write analysis result in Simulation-Result.opem file\n :param OutputDict: Analysis Result Dictionary\n :return: None\n '''\n\n Output_Keys=list(OutputDict.keys())\n Output_Keys.sort()\n file.write(\"I :\"+str(i)+\" A \\n\\n\")\n print(\"I : \"+str(i))\n for key in Output_Keys:\n file.write(key+\" : \"+str(OutputDict[key][0])+\" \"+OutputDict[key][1]+\"\\n\")\n print(key+\" : \"+str(OutputDict[key][0])+\" \"+OutputDict[key][1])\n file.write(\"###########\\n\")\n print(\"###########\")\ndef Output_Init(InputDict):\n '''\n This function initial output file\n :param InputDict: Input Test Vector\n :type InputDict:dict\n :return: file object\n '''\n Art = text2art(\"Opem\")\n file = open(\"Simulation-Result.opem\", \"w\")\n file.write(Art)\n file.write(\"Simulation Date : \" + str(datetime.datetime.now()) + \"\\n\")\n file.write(\"**********\\n\")\n file.write(\"Amphlett Static Model\\n\\n\")\n file.write(\"**********\\n\")\n file.write(\"Simulation Inputs : \\n\\n\")\n Input_Keys = list(InputDict.keys())\n Input_Keys.sort()\n for key in Input_Keys:\n file.write(key + \" : \" + str(InputDict[key]) + \"\\n\")\n file.write(\"**********\\n\")\n return file\n\ndef Static_Analysis(InputMethod=Get_Input,TestMode=False):\n '''\n This function run static analysis with calling other functions\n :return: None\n '''\n try:\n if TestMode==False:\n Input_Dict=InputMethod()\n else:\n Input_Dict=InputMethod\n OutputFile=Output_Init(Input_Dict)\n print(\"Analyzing . . .\")\n IEndMax=Input_Dict[\"JMax\"]*Input_Dict[\"A\"]\n IEnd=min(IEndMax,Input_Dict[\"i-stop\"])\n IStep=Input_Dict[\"i-step\"]\n Enernst=Enernst_Calc(Input_Dict[\"T\"],Input_Dict[\"PH2\"],Input_Dict[\"PO2\"])\n i=Input_Dict[\"i-start\"]\n while(i\"+os.getcwd())\n except Exception:\n if OutputFile.closed==False:\n OutputFile.close()\n print(\"[Error] Simulation Faild!(Check Your Inputs)\")\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "opem/opem.py", "file_name": "opem.py", "file_ext": "py", "file_size_in_byte": 8860, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "math.log", "line_number": 24, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 37, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 50, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 65, "usage_type": "call"}, {"api_name": "math.log", "line_number": 80, "usage_type": "call"}, {"api_name": "math.log", "line_number": 95, "usage_type": "call"}, {"api_name": "math.log", "line_number": 138, "usage_type": "call"}, {"api_name": "art.text2art", "line_number": 222, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 225, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 225, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 275, "usage_type": "call"}]} +{"seq_id": "645218477", "text": "import matplotlib.pyplot as plt\nimport math\ndata = open('pions.f14', 'r')\nk = 0\n\nheading_id = 'UQMD'\npions = []\npions_plus = []\npions_minus = []\npions_0 = []\nstrings = []\nresonances = []\nnumber_of_events = 100000\nfor line in data:\n line = line.split(' ') \n temp_l = []\n k += 1\n for j in line:#delete '0' and '\\n' elements from sublists of data_l\n if len(j) != 0 and '\\n' not in j:\n temp_l.append(j)\n elif '\\n' in j:\n temp_l.append(j[0:len(j)-1])\n line = temp_l\n if k == 20:\n if line[0] == heading_id:\n k = 0\n continue\n if line[9] == '101' and (float(line[5])**2 + float(line[6])**2)**(1/2)/float(line[7])\\\n <= 0.105104235266 and float(line[7]) < 0:\n pions.append(float(line[4]))\n if line[10] == '2':\n pions_plus.append(float(line[4]))\n elif line[10] == '-2':\n pions_minus.append(float(line[4]))\n elif line[10] == '0':\n pions_0.append(float(line[4]))\n if line[14] == '20':\n resonances.append(float(line[4]))\n elif line[14] in ['15', '23', '24', '27', '28']:\n strings.append(float(line[4]))\n k = 19\n\n\nprint(pions)\n\ndelta_e = 0.01\n\n\ndef plot(array, label = 'None', m = 0.140):\n \"draws a plot\"\n number_of_elements = int(max(array)//delta_e + 1)\n energy_interval = []\n number_of_particles = []\n for i in range(number_of_elements):\n number_of_particles.append(0)\n energy_interval.append(delta_e*(i+1))\n for energy in array:\n n = int(energy//delta_e)\n number_of_particles[n] += 1\n x = []\n y = []\n for i in range(len(number_of_particles)):\n if energy_interval[i] > m:\n if number_of_particles[i] != 0:\n x.append(energy_interval[i])\n y.append(number_of_particles[i]/(math.sqrt(energy_interval[i]**2 - m**2))/delta_e/number_of_events)\n plt.step(x, y, label = label)\n\n\nplt.figure(1)\nplot(pions, 'pi')\nplot(pions_plus)\nplot(pions_minus)\nplot(pions_0)\nplt.yscale('log')\nplt.figure(2)\nplot(pions, 'pi')\nplot(resonances, 'resonances')\nplot(strings, 'strings')\nplt.yscale('log')\nplt.legend()\nplt.show()\n", "sub_path": "f14_looking_for_pions.py", "file_name": "f14_looking_for_pions.py", "file_ext": "py", "file_size_in_byte": 2237, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "math.sqrt", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.step", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yscale", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yscale", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}]} +{"seq_id": "564062858", "text": "from datetime import datetime\nimport logging\nimport os\nimport sys\nimport time\n\n\ndef init_logger(name='main', logdir='./logs',\n log_to_stdout=True, log_to_file=True):\n\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n formatter = logging.Formatter(\n '%(asctime)s\\t[%(levelname)s]\\t(%(module)s:'\n '%(lineno)03d)\\t%(message)s')\n\n if log_to_stdout:\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n if log_to_file:\n if not os.path.exists(logdir):\n os.makedirs(logdir)\n assert os.path.isdir(logdir)\n\n fh = logging.FileHandler(os.path.join(logdir, '%s.log' % name))\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n return logger\n\n\nclass CallTimer:\n def __enter__(self):\n self._start = time.clock()\n self._end = None\n return self\n\n def __exit__(self, *args):\n self._end = time.clock()\n\n @property\n def interval(self):\n return (time.clock() - self._start\n if self._end is None\n else self._end - self._start)\n", "sub_path": "shield/utils/logging_.py", "file_name": "logging_.py", "file_ext": "py", "file_size_in_byte": 1226, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 12, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 14, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 19, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 19, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 30, "usage_type": "attribute"}, {"api_name": "time.clock", "line_number": 39, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 44, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "614869428", "text": "# -*- coding: utf-8 -*-\n\n'''\n\n'''\n\nfrom __future__ import (\n division,\n print_function,\n)\n\nimport skimage.data\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport selectivesearch\n\nimport matplotlib.image as mpimg # mpimg 用于读取图片\n\ndef mySelectivesearch(path):\n # loading astronaut image\n # img = skimage.data.astronaut()\n # print(type(img))\n img = mpimg.imread(path) # 读取和代码处于同一目录下的 img.png\n # 此时 img 就已经是一个 np.array 了,可以对它进行任意处理\n\n # perform selective search\n ''' img_lbl, regions = selectivesearch.selective_search(\n img, scale=500, sigma=0.9, min_size=10)\n - img: 'numpy.ndarray'\n - scale: \n - sigma:\n - min_size:\n '''\n img_lbl, regions = selectivesearch.selective_search(\n img, scale=500, sigma=0.9, min_size=3)\n \n\n candidates = set()\n for r in regions:\n # excluding same rectangle (with different segments)\n if r['rect'] in candidates:\n continue\n # # excluding regions smaller than 2000 pixels\n # if r['size'] < 2000:\n # continue\n\n # if r['size'] > 200:\n # print(r['size'] )\n # continue\n\n # if r['size'] < 10 or r['size']>3000:\n # continue\n\n if r['size'] < 3 :\n continue\n \n # print( r['size'], \":\", r['rect'] ) \n\n # distorted rects\n x, y, w, h = r['rect']\n\n if h == 0 :\n continue\n if w == 0:\n continue\n if h < 5 or w < 5:\n continue\n if w / h > 3 or h / w > 3:\n continue\n candidates.add(r['rect'])\n\n return img, candidates\n\ndef showImg(img, candidates):\n ## draw rectangles on the original image\n fig1, ax1 = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))\n ax1.imshow(img)\n for x, y, w, h in candidates:\n print(x, y, w, h)\n rect = mpatches.Rectangle(\n (x, y), w, h, fill=False, edgecolor='red', linewidth=1)\n ax1.add_patch(rect)\n \n plt.show()\n '''\n for x, y, w, h in candidates:\n tmp = img[x:x+w,y:y+h]\n plt.imshow(tmp)\n plt.show()\n '''\n\nif __name__ == \"__main__\":\n # path = '../Data/train_LabelData/LabelData/500_0LmA_rVnydZ4z_CDcA8yqW.jpg'\n path = '../Data/train_data/LabelData/500_0LmA_rVnydZ4z_CDcA8yqW.jpg'\n \n img, candidates = mySelectivesearch(path)\n showImg(img, candidates)\n\n", "sub_path": "src/mySelectivesearch.py", "file_name": "mySelectivesearch.py", "file_ext": "py", "file_size_in_byte": 2522, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "matplotlib.image.imread", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.image", "line_number": 23, "usage_type": "name"}, {"api_name": "selectivesearch.selective_search", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}]} +{"seq_id": "96319908", "text": "from django import forms\nfrom .models import Post, Comment\n\n\nclass PostForm(forms.ModelForm):\n\n class Meta:\n model = Post\n fields = ('title', 'description', 'text', 'image')\n\n def clean_title(self):\n title = self.cleaned_data[\"title\"]\n if len(title) > 25:\n raise forms.ValidationError('not valid length')\n return title\n", "sub_path": "practice_1/project/apps/core/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 371, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.forms.ModelForm", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 5, "usage_type": "name"}, {"api_name": "models.Post", "line_number": 8, "usage_type": "name"}, {"api_name": "django.forms.ValidationError", "line_number": 14, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 14, "usage_type": "name"}]} +{"seq_id": "277789754", "text": "# -*- coding: UTF-8 -*-\n\"\"\"Defines the CLI for destroying Kemp ECS Connection Management load balancer\"\"\"\nimport click\n\nfrom vlab_cli.lib.widgets import Spinner\nfrom vlab_cli.lib.api import consume_task\nfrom vlab_cli.lib.click_extras import MandatoryOption\n\n\n@click.command()\n@click.option('-n', '--name', cls=MandatoryOption,\n help='The name of the Kemp ECS Connection Management load balancer in your lab')\n@click.pass_context\ndef kemp(ctx, name):\n \"\"\"Delete a Kemp ECS Connection Management load balancer\"\"\"\n body = {'name': name}\n consume_task(ctx.obj.vlab_api,\n endpoint='/api/2/inf/kemp',\n message='Destroying Kemp ECS Connection Management load balancer named {}'.format(name),\n body=body,\n method='DELETE')\n with Spinner('Deleting port mapping rules'):\n all_ports = ctx.obj.vlab_api.get('/api/1/ipam/portmap', params={'name': name}).json()['content']['ports']\n for port in all_ports.keys():\n ctx.obj.vlab_api.delete('/api/1/ipam/portmap', json={'conn_port': int(port)})\n click.echo('OK!')\n", "sub_path": "vlab_cli/subcommands/delete/kemp.py", "file_name": "kemp.py", "file_ext": "py", "file_size_in_byte": 1114, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "vlab_cli.lib.api.consume_task", "line_number": 17, "usage_type": "call"}, {"api_name": "vlab_cli.lib.widgets.Spinner", "line_number": 22, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 26, "usage_type": "call"}, {"api_name": "click.command", "line_number": 10, "usage_type": "call"}, {"api_name": "click.option", "line_number": 11, "usage_type": "call"}, {"api_name": "vlab_cli.lib.click_extras.MandatoryOption", "line_number": 11, "usage_type": "name"}, {"api_name": "click.pass_context", "line_number": 13, "usage_type": "attribute"}]} +{"seq_id": "317828327", "text": "import django\nfrom django.utils.functional import cached_property\nfrom django.test.testcases import TestCase\nfrom django.test.client import RequestFactory\nfrom hvad.test_utils.context_managers import UserLoginContext\nimport warnings\n\n\ndef minimumDjangoVersion(*args):\n return (lambda x: x) if django.VERSION >= args else (lambda x: 'disabled')\ndef maximumDjangoVersion(*args):\n return (lambda x: x) if django.VERSION < args else (lambda x: 'disabled')\n\n\nclass _AssertThrowsWarningContext(object):\n def __init__(self, test_case, klass, number):\n self.test_case = test_case\n self.klass = klass\n self.number = number\n self.ctx = warnings.catch_warnings(record=True)\n\n def __enter__(self):\n self.warnings = self.ctx.__enter__()\n warnings.resetwarnings()\n warnings.simplefilter('always')\n\n def __exit__(self, type, value, traceback):\n self.test_case.assertEqual(\n len(self.warnings), self.number, \"%d warnings thrown, %d expected\" % (\n len(self.warnings), self.number\n )\n )\n for warning in self.warnings:\n self.test_case.assertTrue(issubclass(warning.category, self.klass),\n '%s warning thrown, %s expected' %\n (warning.category.__name__, self.klass.__name__))\n self.ctx.__exit__(type, value, traceback)\n\n\nclass HvadTestCase(TestCase):\n def setUp(self):\n\n if hasattr(self, 'create_fixtures'):\n self.create_fixtures()\n\n @cached_property\n def request_factory(self):\n return RequestFactory()\n\n def login_user_context(self, username):\n return UserLoginContext(self, username=username, password=username)\n\n def assertThrowsWarning(self, klass, number=1):\n return _AssertThrowsWarningContext(self, klass, number)\n\n def assertSavedObject(self, obj, language, **kwargs):\n 'Checks the object was saved in given language with given attributes'\n self.assertEqual(language, kwargs.pop('language_code', language),\n 'Test error: mismatching language and language_code.')\n for key, value in kwargs.items():\n self.assertEqual(getattr(obj, key), value)\n self.assertEqual(obj.language_code, language)\n self.assertCountEqual(\n obj.__class__.objects.language(language).filter(**kwargs).values_list('pk', flat=True),\n [obj.pk]\n )\n\n# method was renamed from assertItemsEqual in Python 3\nif not hasattr(HvadTestCase, 'assertCountEqual'):\n HvadTestCase.assertCountEqual = HvadTestCase.assertItemsEqual\n", "sub_path": "hvad/test_utils/testcase.py", "file_name": "testcase.py", "file_ext": "py", "file_size_in_byte": 2646, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.VERSION", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.VERSION", "line_number": 12, "usage_type": "attribute"}, {"api_name": "warnings.catch_warnings", "line_number": 20, "usage_type": "call"}, {"api_name": "warnings.resetwarnings", "line_number": 24, "usage_type": "call"}, {"api_name": "warnings.simplefilter", "line_number": 25, "usage_type": "call"}, {"api_name": "django.test.testcases.TestCase", "line_number": 40, "usage_type": "name"}, {"api_name": "django.test.client.RequestFactory", "line_number": 48, "usage_type": "call"}, {"api_name": "django.utils.functional.cached_property", "line_number": 46, "usage_type": "name"}, {"api_name": "hvad.test_utils.context_managers.UserLoginContext", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "63276141", "text": "# -*- coding: utf-8 -*-\n\nimport logging\nfrom openerp.osv import osv,fields\nfrom openerp import models, fields, api, _\n#CDCM\n#07-04-2021\n#Se agg libreria para poder usar el ValidationError\nfrom openerp.exceptions import ValidationError\n#CDCM\n_logger = logging.getLogger(__name__)\n\nclass respartnerHistorico(models.Model):\n _name=\"res.partner.historico\" \n\n alumno_id = fields.Many2one('res.partner',string=\"Alumno\")\n usuario_id=fields.Many2one('res.users',string=\"Usuario\", default=lambda self:self.env.user ,readonly=True)\n enero = fields.Boolean(string=\"Enero\")\n febrero = fields.Boolean(string=\"Febrero\")\n marzo = fields.Boolean(string=\"Marzo\")\n abril = fields.Boolean(string=\"Abril\")\n mayo = fields.Boolean(string=\"Mayo\")\n junio = fields.Boolean(string=\"Junio\")\n julio = fields.Boolean(string=\"Julio\")\n agosto = fields.Boolean(string=\"Agosto\")\n septiembre = fields.Boolean(string=\"Septiembre\")\n octubre = fields.Boolean(string=\"Octubre\")\n noviembre = fields.Boolean(string=\"Noviembre\")\n diciembre = fields.Boolean(string=\"Diciembre\")\n accion = fields.Selection( (('0','Ninguno'),\n ('1','Facturar'),\n ('2','Cobrar')) , 'Acción', required=False, default='0')\n\nclass respartner(models.Model):\n _inherit=\"res.partner\" \n _order= \"name\"\n _rec_name = \"name\"\n\n property_account_receivable_related = fields.Many2one(related='property_account_receivable',string=\"Cuenta a cobrar\",store=True)\n banco_id=fields.Many2one('res.bank','Banco')\n cuenta_banco=fields.Char('Cuenta Bancaria',size=20)\n codigo_alumno=fields.Char('Código',size=20)\n tipo= fields.Selection( [('P','Representante'),\n ('C','Clientes'),\n ('PA','Padre'),\n ('M','Madre'),\n ('H','Representado')] , 'Tipo', required=False)\n\n jornada_id=fields.Many2one('jornada','Jornada',copy=False, index=True)\n seccion_id=fields.Many2one('seccion','Sección',copy=False, index=True)\n curso_id=fields.Many2one('curso','Curso',copy=False, index=True)\n paralelo_id=fields.Many2one('paralelo','Paralelo',copy=False, index=True)\n cedula=fields.Char(strint='Ced/RUC')\n codigo_auto = fields.Boolean('Cod. Automatico',default=True)\n descuentos_line=fields.One2many('descuentos.tomar', 'partner_ids', string='Descuentos detalle',\n copy=True)\n colaborador = fields.Many2one('tipo.colaborador',string=\"Colaborador\")\n enero = fields.Boolean(string=\"Enero\",default=True)\n febrero = fields.Boolean(string=\"Febrero\",default=True)\n marzo = fields.Boolean(string=\"Marzo\",default=True)\n abril = fields.Boolean(string=\"Abril\",default=True)\n mayo = fields.Boolean(string=\"Mayo\",default=True)\n junio = fields.Boolean(string=\"Junio\",default=True)\n julio = fields.Boolean(string=\"Julio\",default=True)\n agosto = fields.Boolean(string=\"Agosto\",default=True)\n septiembre = fields.Boolean(string=\"Septiembre\",default=True)\n octubre = fields.Boolean(string=\"Octubre\",default=True)\n noviembre = fields.Boolean(string=\"Noviembre\",default=True)\n diciembre = fields.Boolean(string=\"Diciembre\",default=True)\n cobrar = fields.Boolean(string=\"Cobrar\",default=True)\n facturar = fields.Boolean(string=\"Facturar\",default=True)\n\n #-------HISTORICO--------\n jornada_anterior_id=fields.Many2one('jornada','Jornada A.',copy=False, index=True)\n seccion_anterior_id=fields.Many2one('seccion','Sección A.',copy=False, index=True)\n curso_anterior_id=fields.Many2one('curso','Curso A.',copy=False, index=True)\n paralelo_anterior_id=fields.Many2one('paralelo','Paralelo A.',copy=False, index=True)\n historico_id = fields.One2many('res.partner.historico','alumno_id',string=\"Historico\")\n #RERV factura para controlar las facturas emitidas para el alumno\n factura_emitida_ids = fields.Many2many(\"academic.month\", ondelete=\"restrict\")\n #CDCM\n #07-04-2021\n #para que el campo street se requerido en la vista de proveedores\n street = fields.Char(required=True)\n vat = fields.Char(required=True)\n longitud_vat =fields.Integer(compute='_compute_longitud_tipoid')\n\n @api.one\n @api.depends('tipoid')\n def _compute_longitud_tipoid(self):\n self.longitud_vat = self.tipoid.longitud\n\n @api.constrains('vat')\n def verificar_longitud_vat(self):\n if self.longitud_vat != len(self.vat):\n raise ValidationError ('Por favor verifique la longitud del campo NIF, la longuitud debe ser igual a %s'%self.longitud_vat)\n\n #CDCM\n\n @api.onchange('jornada_id')\n def onchange_jornada(self):\n for l in self:\n if l.tipo=='H':\n if l.jornada_id:\n l.seccion_id=False\n l.curso_id= False\n l.paralelo_id = False\n\n @api.onchange('seccion_id')\n def onchange_seccion(self):\n for l in self:\n if l.tipo=='H':\n if l.seccion_id:\n l.curso_id= False\n l.paralelo_id = False\n\n @api.onchange('curso_id')\n def onchange_curso(self):\n for l in self:\n if l.tipo=='H':\n if l.curso_id:\n l.paralelo_id = False\n\n def name_get(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n if isinstance(ids, (int, long)):\n ids = [ids]\n res = []\n for record in self.browse(cr, uid, ids, context=context):\n name = record.name\n # if record.parent_id and not record.is_company:\n # name = \"%s, %s\" % (record.parent_name, name)\n # if context.get('show_address_only'):\n # name = self._display_address(cr, uid, record, without_company=True, context=context)\n # if context.get('show_address'):\n # name = name + \"\\n\" + self._display_address(cr, uid, record, without_company=True, context=context)\n # name = name.replace('\\n\\n','\\n')\n # name = name.replace('\\n\\n','\\n')\n # if context.get('show_email') and record.email:\n # name = \"%s <%s>\" % (name, record.email) \n res.append((record.id, name))\n return res\n\n @api.model\n def create(self, vals):\n valor=0\n try:\n if vals['tipo']=='H':\n while True:\n seq = self.env['ir.sequence'].next_by_code('res.partner') or ''\n obj_data = self.env['res.partner'].search([('codigo_alumno','=',seq)])\n obj_secuence = self.env['ir.sequence'].search([('code','=','res.partner')])\n if len(obj_data)!=0:\n self.env.cr.execute(\"update ir_sequence set number_next='{0}' where code='res.partner'\".format(obj_secuence.number_next_actual))\n else:\n if vals['codigo_auto']==True:\n vals['codigo_alumno'] = seq\n break\n a = super(respartner, self).create(vals)\n except Exception as e:\n a = super(respartner, self).create(vals)\n \n return a\n\n #CDCM \n #07-04-2021\n # se cambia el api.one por el api.multi para verificar que escoja mas de un record\n @api.one\n #CDCM \n @api.depends('name')\n def _carga_pais(self):\n for l in self:\n # if l.tipo!=False:\n obj_pais=self.env['configuracion'].search([('id_pais','!=',None)])\n l.country_id=obj_pais.id_pais.id\n l.state_id=obj_pais.id_provincia.id\n l.zip=l.country_id.codigo\n l.city=obj_pais.ciudad\n\n carga_pais = fields.Boolean(string=\"Cargo\",compute='_carga_pais',readonly='0')\n\n #CAMPOS REUTILIZADOS \n\n vat= fields.Char('TIN', help=\"Tax Identification Number. Check the box if this contact is subjected to taxes. Used by the some of the legal statements.\",size=13)\n \n\n _defaults = { \n 'tipo': 'C',\n }\n \n _sql_constraints = [\n ('cuenta_banco_uniq', 'unique(cuenta_banco)',\n 'La cuenta de banco ya fue registrada!'),\n ]\n \n _sql_constraints = [\n ('codigo_alumno_uniq', 'unique(codigo_alumno)',\n 'La codigo del alumno ya fue registrado!'),\n ]\n\n @api.constrains('vat','tipoid')\n def constrains_vat(self):\n for l in self:\n if l.vat!=False and l.tipo=='P':\n if len(l.vat) == l.tipoid.longitud:\n _logger.info(\"Cedula - Vat - correcta\")\n return True\n else:\n raise osv.except_osv(('Alerta'),(\"Numero de Identificación Incorrecto!\"))\n else:\n _logger.info(\"Cedula - Vat - vacia\")\n\n @api.constrains('cedula')\n def constrains_cedula(self):\n for l in self:\n if l.cedula!=False and l.tipo=='H':\n if len(l.cedula)== 10:\n _logger.info(\"Cedula correcta\")\n else:\n raise osv.except_osv(('Alerta'),(\"Numero de Identificacion Incorrecto!\"))\n else:\n _logger.info(\"Cedula vacia\")\n return False\n\n \n\n def button_check_vat(self):\n return True\n\n def _construct_constraint_msg(self):\n def default_vat_check(cn, vn):\n # by default, a VAT number is valid if:\n # it starts with 2 letters \n # has more than 3 characters\n # return cn[0] in string.ascii_lowercase and cn[1] in string.ascii_lowercase\n # vat_country, vat_number = self._split_vat(self.browse(cr, uid, ids)[0].vat)\n # vat_no = \"'CC##' (CC=Country Code, ##=VAT Number)\"\n # error_partner = self.browse(cr, uid, ids, context=context)\n # if default_vat_check(vat_country, vat_number):\n # vat_no = _ref_vat[vat_country] if vat_country in _ref_vat else vat_no\n # if self.pool['res.users'].browse(cr, uid, uid).company_id.vat_check_vies:\n # return '\\n' + _('The VAT number [%s] for partner [%s] either failed the VIES VAT validation check or did not respect the expected format %s.') % (error_partner[0].vat, error_partner[0].name, vat_no)\n # return '\\n' + _('The VAT number [%s] for partner [%s] does not seem to be valid. \\nNote: the expected format is %s') % (error_partner[0].vat, error_partner[0].name, vat_no)\n return True\n return True\n\n def check_vat(self, cr, uid, ids, context=None):\n\n return True\n\n\n _constraints = [(check_vat, 'CHECK(1=1)', [\"vat\"])]\n\n", "sub_path": "hanibal/ans_escuela/res_partner.py", "file_name": "res_partner.py", "file_ext": "py", "file_size_in_byte": 10629, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "openerp.models.Model", "line_number": 13, "usage_type": "attribute"}, {"api_name": "openerp.models", "line_number": 13, "usage_type": "name"}, {"api_name": "openerp.fields.Many2one", "line_number": 16, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 16, "usage_type": "name"}, {"api_name": "openerp.fields.Many2one", "line_number": 17, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 17, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 18, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 18, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 19, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 19, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 20, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 20, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 21, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 21, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 22, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 22, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 23, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 23, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 24, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 24, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 25, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 25, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 26, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 26, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 27, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 27, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 28, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 28, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 29, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 29, "usage_type": "name"}, {"api_name": "openerp.fields.Selection", "line_number": 30, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 30, "usage_type": "name"}, {"api_name": "openerp.models.Model", "line_number": 34, "usage_type": "attribute"}, {"api_name": "openerp.models", "line_number": 34, "usage_type": "name"}, {"api_name": "openerp.fields.Many2one", "line_number": 39, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 39, "usage_type": "name"}, {"api_name": "openerp.fields.Many2one", "line_number": 40, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 40, "usage_type": "name"}, {"api_name": "openerp.fields.Char", "line_number": 41, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 41, "usage_type": "name"}, {"api_name": "openerp.fields.Char", "line_number": 42, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 42, "usage_type": "name"}, {"api_name": "openerp.fields.Selection", "line_number": 43, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 43, "usage_type": "name"}, {"api_name": "openerp.fields.Many2one", "line_number": 49, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 49, "usage_type": "name"}, {"api_name": "openerp.fields.Many2one", "line_number": 50, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 50, "usage_type": "name"}, {"api_name": "openerp.fields.Many2one", "line_number": 51, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 51, "usage_type": "name"}, {"api_name": "openerp.fields.Many2one", "line_number": 52, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 52, "usage_type": "name"}, {"api_name": "openerp.fields.Char", "line_number": 53, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 53, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 54, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 54, "usage_type": "name"}, {"api_name": "openerp.fields.One2many", "line_number": 55, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 55, "usage_type": "name"}, {"api_name": "openerp.fields.Many2one", "line_number": 57, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 57, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 58, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 58, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 59, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 59, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 60, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 60, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 61, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 61, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 62, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 62, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 63, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 63, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 64, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 64, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 65, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 65, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 66, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 66, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 67, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 67, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 68, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 68, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 69, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 69, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 70, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 70, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 71, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 71, "usage_type": "name"}, {"api_name": "openerp.fields.Many2one", "line_number": 74, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 74, "usage_type": "name"}, {"api_name": "openerp.fields.Many2one", "line_number": 75, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 75, "usage_type": "name"}, {"api_name": "openerp.fields.Many2one", "line_number": 76, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 76, "usage_type": "name"}, {"api_name": "openerp.fields.Many2one", "line_number": 77, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 77, "usage_type": "name"}, {"api_name": "openerp.fields.One2many", "line_number": 78, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 78, "usage_type": "name"}, {"api_name": "openerp.fields.Many2many", "line_number": 80, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 80, "usage_type": "name"}, {"api_name": "openerp.fields.Char", "line_number": 84, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 84, "usage_type": "name"}, {"api_name": "openerp.fields.Char", "line_number": 85, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 85, "usage_type": "name"}, {"api_name": "openerp.fields.Integer", "line_number": 86, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 86, "usage_type": "name"}, {"api_name": "openerp.api.one", "line_number": 88, "usage_type": "attribute"}, {"api_name": "openerp.api", "line_number": 88, "usage_type": "name"}, {"api_name": "openerp.api.depends", "line_number": 89, "usage_type": "call"}, {"api_name": "openerp.api", "line_number": 89, "usage_type": "name"}, {"api_name": "openerp.exceptions.ValidationError", "line_number": 96, "usage_type": "call"}, {"api_name": "openerp.api.constrains", "line_number": 93, "usage_type": "call"}, {"api_name": "openerp.api", "line_number": 93, "usage_type": "name"}, {"api_name": "openerp.api.onchange", "line_number": 100, "usage_type": "call"}, {"api_name": "openerp.api", "line_number": 100, "usage_type": "name"}, {"api_name": "openerp.api.onchange", "line_number": 109, "usage_type": "call"}, {"api_name": "openerp.api", "line_number": 109, "usage_type": "name"}, {"api_name": "openerp.api.onchange", "line_number": 117, "usage_type": "call"}, {"api_name": "openerp.api", "line_number": 117, "usage_type": "name"}, {"api_name": "openerp.api.model", "line_number": 145, "usage_type": "attribute"}, {"api_name": "openerp.api", "line_number": 145, "usage_type": "name"}, {"api_name": "openerp.api.one", "line_number": 169, "usage_type": "attribute"}, {"api_name": "openerp.api", "line_number": 169, "usage_type": "name"}, {"api_name": "openerp.api.depends", "line_number": 171, "usage_type": "call"}, {"api_name": "openerp.api", "line_number": 171, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 181, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 181, "usage_type": "name"}, {"api_name": "openerp.fields.Char", "line_number": 185, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 185, "usage_type": "name"}, {"api_name": "openerp.osv.osv.except_osv", "line_number": 210, "usage_type": "call"}, {"api_name": "openerp.osv.osv", "line_number": 210, "usage_type": "name"}, {"api_name": "openerp.api.constrains", "line_number": 202, "usage_type": "call"}, {"api_name": "openerp.api", "line_number": 202, "usage_type": "name"}, {"api_name": "openerp.osv.osv.except_osv", "line_number": 221, "usage_type": "call"}, {"api_name": "openerp.osv.osv", "line_number": 221, "usage_type": "name"}, {"api_name": "openerp.api.constrains", "line_number": 214, "usage_type": "call"}, {"api_name": "openerp.api", "line_number": 214, "usage_type": "name"}]} +{"seq_id": "461488868", "text": "\nimport os\nimport random\nimport pygame as pg\n\nfrom game.game import Game\n\nclass Loader:\n def __init__(self, base, sound_capable):\n self.base_dir = base\n self.sound_capable = sound_capable\n\n def load_image(self, path):\n print(\"load_image: %s\" % path)\n img = pg.image.load(os.path.join(self.base_dir, path))\n img.set_colorkey((255, 0, 255))\n return img\n #return .convert_alpha(self.screen)\n\n def load_ttf(self, path, size):\n print(\"load_font: %s\" % path)\n fnt = pg.font.Font(os.path.join(self.base_dir, path), size)\n return fnt\n\n def load_wav(self, path):\n print(\"load_wav: %s\" % path)\n if not self.sound_capable:\n return None\n\n snd = pg.mixer.Sound(os.path.join(self.base_dir, path))\n return snd\n\nclass App:\n\n def __init__(self):\n random.seed()\n print(\"pygame ver=%s\"%pg.version.ver)\n pg.init()\n pg.font.init()\n\n pg.mixer.init(22050, 16, 2)\n self.sound_capable = bool(pg.mixer.get_init())\n print(\"sound: %s\"%self.sound_capable)\n \n self.loader = Loader(os.getcwd() + \"/data\", self.sound_capable)\n icon = self.loader.load_image(\"icon-32.png\")\n pg.display.set_icon(icon)\n\n self.screen = pg.display.set_mode([800, 600])\n self.repaint()\n self.callback_count = 0\n self.callback_code = None\n pg.display.set_caption('Spidee')\n\n def draw_text( self, font, x, y, text, color ):\n text_surface = font.render(text, True, color)\n self.screen.blit(text_surface, (x, y))\n\n def repaint(self):\n self.do_repaint = True\n\n def set_callback(self, count, code):\n self.callback_count = count\n self.callback_code = code\n\n def main(self):\n\n g = Game(self)\n g.resize()\n g.reset()\n\n while(True):\n pg.time.delay(10)\n\n for event in pg.event.get():\n if event.type == pg.QUIT:\n return \n\n elif event.type == pg.MOUSEMOTION:\n g.mouse_move(event.pos[0], event.pos[1])\n\n elif event.type == pg.MOUSEBUTTONDOWN:\n g.mouse_down()\n\n elif event.type == pg.MOUSEBUTTONUP:\n g.mouse_up()\n\n elif event.type == pg.KEYDOWN:\n g.key_down(event.key)\n\n if self.callback_code:\n if self.callback_count > 0:\n self.callback_count -= 1\n else: \n code = self.callback_code \n self.callback_code = None\n code()\n \n \n if self.do_repaint:\n self.do_repaint = False\n g.draw()\n pg.display.flip()\n\nif __name__ == \"__main__\": App().main()\n", "sub_path": "Spidee.py", "file_name": "Spidee.py", "file_ext": "py", "file_size_in_byte": 2880, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pygame.image.load", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 30, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "random.seed", "line_number": 36, "usage_type": "call"}, {"api_name": "pygame.version", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.font.init", "line_number": 39, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pygame.mixer.init", "line_number": 41, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pygame.mixer.get_init", "line_number": 42, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 45, "usage_type": "call"}, {"api_name": "pygame.display.set_icon", "line_number": 47, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 49, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 53, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 53, "usage_type": "attribute"}, {"api_name": "game.game.Game", "line_number": 68, "usage_type": "call"}, {"api_name": "pygame.time.delay", "line_number": 73, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 73, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 75, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 75, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 76, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEMOTION", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 82, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONUP", "line_number": 85, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 88, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 103, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 103, "usage_type": "attribute"}]} +{"seq_id": "353277712", "text": "import fbe\nimport proto\nimport timeit\nfrom proto import proto\n\n\nclass BenchmarkSerialization(object):\n\n def __init__(self):\n # Create a new account with some orders\n self.account = proto.Account(1, \"Test\", proto.State.good, proto.Balance(\"USD\", 1000.0), proto.Balance(\"EUR\", 100.0))\n self.account.orders.append(proto.Order(1, \"EURUSD\", proto.OrderSide.buy, proto.OrderType.market, 1.23456, 1000.0))\n self.account.orders.append(proto.Order(2, \"EURUSD\", proto.OrderSide.sell, proto.OrderType.limit, 1.0, 100.0))\n self.account.orders.append(proto.Order(3, \"EURUSD\", proto.OrderSide.buy, proto.OrderType.stop, 1.5, 10.0))\n\n # Serialize the account to the FBE stream\n self.writer = proto.AccountModel(fbe.WriteBuffer())\n self.writer.serialize(self.account)\n assert self.writer.verify()\n\n # Deserialize the account from the FBE stream\n self.reader = proto.AccountModel(fbe.ReadBuffer())\n self.reader.attach_buffer(self.writer.buffer)\n assert self.reader.verify()\n self.reader.deserialize(self.account)\n\n def serialize(self):\n # Reset FBE stream\n self.writer.reset()\n\n # Serialize the account to the FBE stream\n self.writer.serialize(self.account)\n\n def deserialize(self):\n # Deserialize the account from the FBE stream\n self.reader.deserialize(self.account)\n\n def verify(self):\n # Verify the account\n self.writer.verify()\n\n\nbenchmark = BenchmarkSerialization()\niterations = 100000\n\n\ndef report(name, duration):\n print()\n print(\"Phase: {}\".format(name))\n print(\"Average time: {:.3f} mcs / iteration\".format(duration / iterations * 1000000))\n print(\"Total time: {:.3f} s\".format(duration))\n print(\"Total iterations: {}\".format(iterations))\n print(\"Iterations throughput: {:.3f} / second\".format(iterations / duration))\n print()\n\n\ndef main():\n # Benchmark verify() method\n times = timeit.repeat(setup='print(\"Benchmarking verify() method...\")', stmt=\"benchmark.verify()\", repeat=5, number=iterations, globals=globals())\n duration = min(times)\n report(\"Verify\", duration)\n\n # Benchmark serialize() method\n times = timeit.repeat(setup='print(\"Benchmarking serialize() method...\")', stmt=\"benchmark.serialize()\", repeat=5, number=iterations, globals=globals())\n duration = min(times)\n report(\"Serialize\", duration)\n\n # Benchmark deserialize() method\n times = timeit.repeat(setup='print(\"Benchmarking deserialize() method...\")', stmt=\"benchmark.deserialize()\", repeat=5, number=iterations, globals=globals())\n duration = min(times)\n report(\"Deserialize\", duration)\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "projects/Python/benchmarks/benchmark_serialization.py", "file_name": "benchmark_serialization.py", "file_ext": "py", "file_size_in_byte": 2715, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "proto.proto.Account", "line_number": 11, "usage_type": "call"}, {"api_name": "proto.proto", "line_number": 11, "usage_type": "name"}, {"api_name": "proto.proto.State", "line_number": 11, "usage_type": "attribute"}, {"api_name": "proto.proto.Balance", "line_number": 11, "usage_type": "call"}, {"api_name": "proto.proto.Order", "line_number": 12, "usage_type": "call"}, {"api_name": "proto.proto", "line_number": 12, "usage_type": "name"}, {"api_name": "proto.proto.OrderSide", "line_number": 12, "usage_type": "attribute"}, {"api_name": "proto.proto.OrderType", "line_number": 12, "usage_type": "attribute"}, {"api_name": "proto.proto.Order", "line_number": 13, "usage_type": "call"}, {"api_name": "proto.proto", "line_number": 13, "usage_type": "name"}, {"api_name": "proto.proto.OrderSide", "line_number": 13, "usage_type": "attribute"}, {"api_name": "proto.proto.OrderType", "line_number": 13, "usage_type": "attribute"}, {"api_name": "proto.proto.Order", "line_number": 14, "usage_type": "call"}, {"api_name": "proto.proto", "line_number": 14, "usage_type": "name"}, {"api_name": "proto.proto.OrderSide", "line_number": 14, "usage_type": "attribute"}, {"api_name": "proto.proto.OrderType", "line_number": 14, "usage_type": "attribute"}, {"api_name": "proto.proto.AccountModel", "line_number": 17, "usage_type": "call"}, {"api_name": "proto.proto", "line_number": 17, "usage_type": "name"}, {"api_name": "fbe.WriteBuffer", "line_number": 17, "usage_type": "call"}, {"api_name": "proto.proto.AccountModel", "line_number": 22, "usage_type": "call"}, {"api_name": "proto.proto", "line_number": 22, "usage_type": "name"}, {"api_name": "fbe.ReadBuffer", "line_number": 22, "usage_type": "call"}, {"api_name": "timeit.repeat", "line_number": 59, "usage_type": "call"}, {"api_name": "timeit.repeat", "line_number": 64, "usage_type": "call"}, {"api_name": "timeit.repeat", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "29781445", "text": "import tensorflow as tf\nimport numpy as np\nfrom skimage import io, transform\n\nimport os\nimport urllib\nimport glob\n\nSOURCE_URL = \"http://download.tensorflow.org/example_images/\"\n\n# 将所有的图片resize成100*100\nw = 100\nh = 100\nc = 3\n\ndef read_data_sets(data_dir = ''):\n path = '.\\\\data\\\\flower_photos'\n path = os.path.abspath(path)\n\n cate=[path + \"\\\\\" + x for x in os.listdir(path) if os.path.isdir(path + '\\\\' + x)]\n imgs = []\n labels = []\n for idx, folder in enumerate(cate):\n for im in glob.glob(folder + \"\\\\*.jpg\"):\n print('reading the images: %s' % (im))\n img = io.imread(im)\n img = transform.resize(img, (w, h))\n imgs.append(img)\n labels.append(idx)\n\n data = np.asarray(imgs, np.float32)\n label = np.asarray(labels, np.int32)\n \n data, label = upsetData(data, label)\n x_train, y_train, x_test, y_test = splitData(data, label)\n\n return x_train, y_train, x_test, y_test\n \n \ndef upsetData(data, label):\n num_example = data.shape[0]\n arr = np.arange(num_example)\n np.random.shuffle(arr)\n data = data[arr]\n label = label[arr]\n\n return data, label\n\ndef splitData(data, label):\n ratio = 0.8\n s = np.int(data.shape[0] * ratio)\n x_train = data[ :s]\n y_train = label[ :s]\n\n x_test = data[s: ]\n y_test = label[s: ]\n\n return x_train, y_train, x_test, y_test", "sub_path": "python/CNN-Flower/loadData.py", "file_name": "loadData.py", "file_ext": "py", "file_size_in_byte": 1397, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "os.path.abspath", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 24, "usage_type": "call"}, {"api_name": "skimage.io.imread", "line_number": 26, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 26, "usage_type": "name"}, {"api_name": "skimage.transform.resize", "line_number": 27, "usage_type": "call"}, {"api_name": "skimage.transform", "line_number": 27, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 43, "usage_type": "attribute"}, {"api_name": "numpy.int", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "493378519", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 5 21:18:43 2017\n\n@author: ashirley\n\"\"\"\nimport sys\nimport numpy\nimport pandas\nimport sklearn.metrics\nimport sklearn.model_selection\nimport sklearn.linear_model\nimport sklearn.preprocessing\nimport matplotlib.pyplot as plt\n\nclass YY:\n def __init__(self, hat, test):\n self.test = test\n self.hat = hat\n\ndef load_train_test_data(train_ratio=.5):\n data = pandas.read_csv('./HTRU_2.csv', header = None)\n x = data.iloc[:, :8] \n y = data.iloc[:, 8]\n return sklearn.model_selection.train_test_split(x, y, test_size = 1 - train_ratio, random_state=0)\n\ndef scale_features(X_train, X_test, low=0, upp=1):\n minmax_scaler = sklearn.preprocessing.MinMaxScaler(feature_range=(low, upp)).fit(numpy.vstack((X_train, X_test)))\n X_train_scale = minmax_scaler.transform(X_train)\n X_test_scale = minmax_scaler.transform(X_test)\n return X_train_scale, X_test_scale\n\ndef gradient_ascend(X, y, alpha = .001, iters = 100000, eps=.001):\n n, d = X.shape\n theta = numpy.matrix(numpy.zeros((d , 1)))\n X = numpy.matrix(X)\n y = numpy.transpose(numpy.matrix(y))\n yh = numpy.matrix(numpy.zeros((n , 1)))\n l = 1 #lambda \n for iter in range(iters):\n e = (-1) * X * theta \n yh = 1.0 / (1 + numpy.exp(e)) \n g = numpy.transpose(X) * (y - yh) - l/2\n diff = alpha * g\n theta = theta + diff \n k = max(abs(diff[j]) for j in range(d))\n if k < eps:\n return theta\n return theta\n\ndef roc(y):\n fpr= numpy.array([])\n tpr= numpy.array([])\n summ = sum(y[i].test for i in range(len(y)))\n n= float(len(y) - summ)\n p= float(summ)\n TP=0.0\n FP=0.0\n for i in range(len(y)):\n tpr = numpy.insert(tpr, 0, TP/p)\n fpr = numpy.insert(fpr, 0, FP/n)\n TP = TP + y[i].test\n FP = i - TP \n return fpr, tpr\n\ndef predict(X, theta):\n e = (-1) * X * theta \n y = 1 / (1 + numpy.exp(e))\n return y\n\ndef main(argv):\n X_train, X_test, y_train, y_test = load_train_test_data(train_ratio=.5)\n X_train_scale, X_test_scale = scale_features(X_train, X_test, 0, 1)\n X_train_scale = numpy.concatenate((numpy.ones((len(X_train_scale),1)), X_train_scale), axis = 1)\n X_test_scale = numpy.concatenate((numpy.ones((len(X_test_scale),1)), X_test_scale), axis = 1)\n theta = gradient_ascend(X_train_scale, y_train)\n y_hat = predict(X_test_scale, theta)\n y_test = list(y_test)\n y_hat.tolist()\n \n y =[]\n for i in range(len(y_test)):\n y.append(YY(y_hat[i], y_test[i]))\n y.sort(key = lambda y: y.hat, reverse = True) \n fpr, tpr = roc(y)\n plt.plot(fpr, tpr)\n plt.show()\n\nif __name__ == \"__main__\":\n main(sys.argv)", "sub_path": "logreg_104502535.py", "file_name": "logreg_104502535.py", "file_ext": "py", "file_size_in_byte": 2741, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pandas.read_csv", "line_number": 22, "usage_type": "call"}, {"api_name": "sklearn.metrics.model_selection.train_test_split", "line_number": 25, "usage_type": "call"}, {"api_name": "sklearn.metrics.model_selection", "line_number": 25, "usage_type": "attribute"}, {"api_name": "sklearn.metrics", "line_number": 25, "usage_type": "name"}, {"api_name": "sklearn.metrics.preprocessing.MinMaxScaler", "line_number": 28, "usage_type": "call"}, {"api_name": "sklearn.metrics.preprocessing", "line_number": 28, "usage_type": "attribute"}, {"api_name": "sklearn.metrics", "line_number": 28, "usage_type": "name"}, {"api_name": "numpy.vstack", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 90, "usage_type": "attribute"}]} +{"seq_id": "475157041", "text": "from django.db import models\nfrom datetime import datetime\nfrom django.contrib.auth.models import User\nfrom django.utils.translation import ugettext_lazy as _\n\n# Create your models here.\nclass BanList(models.Model):\n plain_type = models.CharField(\n _(\"Type\"),\n max_length=64,\n help_text=_(\"Ban type, e.x. can not \\\n login beacuse of being banned as RUSSIAN\"),\n default='default'\n )\n reason = models.CharField(\n _('reason'), blank=True, null=True,\n max_length=4096\n )\n created_on = models.DateTimeField(\n _('created on'), auto_now_add=datetime.now)\n updated_on = models.DateTimeField(\n _('updated on'), auto_now=datetime.now\n )\n owner = models.ForeignKey(User)\n\n class Meta:\n abstract = True\n\n\nclass UserBanList(BanList):\n nickname = models.CharField(\n _(\"Nickname\"), help_text=_(\"Nickname of banned user\"),\n max_length=512,\n )\n\n class Meta:\n ordering = ['created_on', ]\n\n\nclass ServerBanList(BanList):\n ip_address = models.IPAddressField(\n _('ip address'), help_text=_(\"Server ip address\"))\n server_name = models.CharField(\n _('server name'), max_length=512, blank=True, null=True)\n\n class Meta:\n ordering = ['created_on', ]\n", "sub_path": "apps/banlist/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1294, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.db.models.Model", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 8, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 8, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 9, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 11, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 15, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 16, "usage_type": "call"}, {"api_name": "django.db.models.DateTimeField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 20, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 22, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 24, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 24, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 31, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 31, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.models.IPAddressField", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 42, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 43, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 43, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "628495037", "text": "#!/usr/bin/env python3\nimport pycparser.c_ast\nimport pycparser.c_parser\nimport copy\nimport sys\nimport pickle\nimport os\n\nsys.dont_write_bytecode = True #we don't want these .pyc files!\n\nfrom helper_functions_by_pycparser import *\nfrom our_helper_functions import *\n\ngive_small_output=False\n\nif len(sys.argv)!=2:\n\tprint(\"Usage: \"+sys.argv[0]+\" \")\n\tsys.exit(-1)\n\nast = pycparser.parse_file(sys.argv[1],use_cpp=True)\n\n#ast.show()\n\ndictify = lambda n: {k:v for k,v in n.children()}\nlistify = lambda n: [v for _,v in n.children()] if n else []\n\ndef access(node, *path):\n\ttmp = node\n\tfor label in path:\n\t\ttry:\n\t\t\ttmp = dictify(tmp)[label]\n\t\texcept KeyError:\n\t\t\treturn None\n\treturn tmp\n\ndef typetorepr(node, word_size=8,**kwargs):\n\tast_of_last_decl=kwargs.get(\"ast_of_last_decl\",None) #get the ast of the last Decl/TypeDecl/Typename\n\tast_of_last_proper_Decl=kwargs.get(\"ast_of_last_proper_Decl\",None) #get the ast of the last Decl (only)\n\tast_of_last_decl_init=kwargs.get(\"ast_of_last_decl_init\",None) #get the ast of the last Decl init block\n\tparent_node=kwargs.get(\"parent_node\",None) #grab a pointer to our parent node\n\n\tkwargs[\"parent_node\"]=node #set ourselves as the parent node\n\t\n\tif (isinstance(node,pycparser.c_ast.TypeDecl) #NOT A GOOD WAY to check, we need to do something better than this\n\t\tand isinstance(node.type,pycparser.c_ast.IdentifierType)):\n\t\t#the idea here is that we need to transfer the init block from the last Decl. We should cancel what we got for our children, EXCEPT if we are a TypeDecl (and followed by an Identifier)\n\t\tpass\t\n\telse:\n\t\tkwargs[\"ast_of_last_decl_init\"]=None # cancel the last decl init\n\n\tif isinstance(node, pycparser.c_ast.ArrayDecl):\n\t\tty, sz = typetorepr(node.type, word_size,**kwargs)\n\t\t#get the dimension of the array\n\t\tif RepresentsInt(get_original_C_code_of_ast(node.dim)):\n\t\t\tn = int(node.dim.value, 10)\n\t\telse:\n\t\t\tn = -42424242424242 #denotes \"variable size\"\n\t\tif (give_small_output):\n\t\t\treturn (['array', (ty, sz), n], n*sz)\n\t\telse:\n\t\t\treturn (['array',\n\t\t\t\t\t{\"type\":\"array\", \"name\":get_name_of_a_node(parent_node) , \"type_of_array_element\":(ty, sz), \"num_of_array_elements:\":n, \"size\":n*sz , \"init\":ast_of_last_decl_init,\"pycparser_ast\":copy.deepcopy(node), \"parent_node\":parent_node, \"ast_of_last_proper_Decl\":ast_of_last_proper_Decl}],\n\t\t\t\t\tn*sz)\n\t\n\tif isinstance(node, pycparser.c_ast.Struct):\n\t\ttypes = []\n\t\tsize = 0\n\n\t\tif node.decls==None and kwargs[\"in_struct_types\"]==True and kwargs[\"name_of_struct_in_which_we_are\"]==node.name: #probably we are in a pointer to the same struct type\n\t\t\tsize=kwargs[\"size_of_struct_so_far\"]+kwargs[\"num_of_elements_of_struct_left\"]*8 #assume everything at the end of the struct are pointers\n\t\t\ttypes=[]\n\n\t\tif node.decls!=None: #a struct that has already been declared and referenced again (with \"struct a b;\") does not have declarations\n\t\t\tfor i,decl in enumerate(node.decls):\n\t\t\t\tkwargs[\"in_struct_types\"]=True\n\t\t\t\tkwargs[\"name_of_struct_in_which_we_are\"]=node.name\n\t\t\t\tkwargs[\"size_of_struct_so_far\"]=size\n\t\t\t\tkwargs[\"num_of_elements_of_struct_left\"]=len(node.decls)-i\n\t\t\t\tty, sz = typetorepr(decl,**kwargs)\n\t\t\t\tkwargs[\"in_struct_types\"]=False\n\t\t\t\ttypes.append((ty, sz))\n\t\t\t\tsize += sz\n\t\telse:\n\t\t\t#if the struct is already present in the known structs, fetch the size and elements from there\n\t\t\tif node.name in all_structs_dict:\n\t\t\t\tsize=all_structs_dict[node.name][1]\n\t\t\t\ttypes=copy.deepcopy(all_structs_dict[node.name][0][1]['struct_elements'])\n\t\tif (give_small_output):\n\t\t\tretval=(['struct', node.name, types], size)\n\t\telse:\n\t\t\tretval= (['struct',\n\t\t\t\t\t{\"type\":\"struct\", \"name\":node.name, \"name_of_struct_variable\":get_name_of_a_node(parent_node), \"size\":size, \"init\":ast_of_last_decl_init, \"struct_elements\":types ,\"pycparser_ast\":copy.deepcopy(node), \"parent_node\":parent_node, \"ast_of_last_proper_Decl\":ast_of_last_proper_Decl}], \n\t\t\t\t\tsize)\n\t\tif (node.name not in all_structs_dict) and node.decls!=None: #node.decls!=None in order to not insert the pointer (inside the struct) to the struct of the same type into the dict\n\t\t\tstruct_dict=copy.deepcopy(retval)\n\t\t\tstruct_dict[0][1].pop(\"name_of_struct_variable\", None) #delete \"name_of_struct_variable\" as it is not relevant to a struct description\n\t\t\tall_structs_dict[node.name]=struct_dict #add it to the all structs dict if it is not there.\n\t\treturn retval\n\n\tif isinstance(node, pycparser.c_ast.PtrDecl):\n\t\tif (give_small_output):\n\t\t\treturn (['pointer', typetorepr(node.type,**kwargs)], word_size)\n\t\telse:\n\t\t\treturn (['pointer',\n\t\t\t\t\t{\"type\":\"pointer\", \"name\":get_name_of_a_node(parent_node) , \"type_of_pointed_element\":typetorepr(node.type,**kwargs), \"size\":word_size , \"init\":ast_of_last_decl_init, \"pycparser_ast\":copy.deepcopy(node), \"parent_node\":parent_node, \"ast_of_last_proper_Decl\":ast_of_last_proper_Decl}],\n\t\t\t\t\tword_size)\n\n\tif isinstance(node, pycparser.c_ast.IdentifierType):\n\t\tname = \" \".join(node.names)\n\t\tsize = get_size_of_type(name,typedefs)\n\t\tif (give_small_output):\n\t\t\treturn (name, size)\n\t\telse:\n\t\t\treturn ([name,\n\t\t\t\t\t{\"type\":name, \"name\":get_name_of_a_node(parent_node),\"size\":size , \"init\":ast_of_last_decl_init, \"pycparser_ast\":copy.deepcopy(node), \"parent_node\":parent_node, \"ast_of_last_proper_Decl\":ast_of_last_proper_Decl}],\n\t\t\t\t\tsize)\n\n\tif isinstance(node, pycparser.c_ast.FuncDecl):\n\t\tty = node.type\n\t\treturn_value_parse=typetorepr(node.type, word_size,**kwargs)\n\t\tlist_with_arguments_parse=[typetorepr(arg,**kwargs) for arg in listify(node.args)]\n\t\tif (give_small_output):\n\t\t\treturn (['function',list_with_arguments_parse , return_value_parse], None)\n\t\telse:\n\t\t\treturn (['function decl',\n\t\t\t\t\t{\"type\":\"function decl\", \"name\":get_name_of_a_node(parent_node) , \"list_of_arguments\":list_with_arguments_parse, \"return_value\":return_value_parse , \"pycparser_ast\":copy.deepcopy(node), \"parent_node\":parent_node, \"ast_of_last_proper_Decl\":ast_of_last_proper_Decl}],\n\t\t\t\t\tNone)\n\t\t\n\tif isinstance(node, (pycparser.c_ast.Decl, pycparser.c_ast.TypeDecl, pycparser.c_ast.Typename)):\n\t\tkwargs[\"ast_of_last_decl\"]=node\n\t\tif isinstance(node, (pycparser.c_ast.Decl)):\n\t\t\tkwargs[\"ast_of_last_proper_Decl\"]=node\n\t\t\tkwargs[\"ast_of_last_decl_init\"]=copy.deepcopy(node.init)\n\t\tretval=typetorepr(node.type,**kwargs)\n\t\tif isinstance(node, (pycparser.c_ast.Decl)):\n\t\t\tdecls_to_gather.append(retval)\n\t\treturn retval\n\t\n\tnode.show()\n\tassert False, 'Unhandled type %r %r %r' % (type(node), dictify(node), dir(node))\n\nfunction_types = dict()\nglobal_decls=[]\nkwargs=dict()\ndecls_to_gather=[]\ntypedefs=dict()\nall_structs_dict=dict()\nglobal_decl_names=[]\n\nfor node in listify(ast):\n\tkwargs[\"parent_node\"]=node\n\tkwargs[\"in_struct_types\"]=False\n\tif isinstance(node, pycparser.c_ast.Typedef):\n\t\tname_of_typedef=node.name\t\t\n\t\ttypedefs[name_of_typedef]=typetorepr(node.type,**kwargs)\n\tif isinstance(node, pycparser.c_ast.Decl):\n\t\t(ty,sz)=typetorepr(node,**kwargs)\n\t\tglobal_decls.append((ty,sz))\n\t\tglobal_decl_names.append(node.name)\n\t\tprint (node.name)\n\t\tprint ('\\t', (ty,sz))\n\tif isinstance(node, pycparser.c_ast.FuncDef):\n\t\tname_of_fun=node.decl.name\n\t\tdecls_to_gather=[]\n\t\tlocal_vars=[typetorepr(local,**kwargs) for local in node.body.block_items if isinstance(local, (pycparser.c_ast.Decl))]\n\t\tfunction_types[name_of_fun] = {\"fun_decl\":typetorepr(node.decl,**kwargs), \"fun_locals\":local_vars }\n\t\tprint(name_of_fun)\n\t\tprint ('\\t', function_types[name_of_fun])\n\n\nprint(\"\\n\\nFUNCTIONS:\\n\")\nif (give_small_output):\n\tprint(function_types)\nelse:\n\tfor x in function_types:\n\t\tprint(\"\\n\",x,\" : \")\n\t\tprint(\"\\t\\t function decl:\",function_types[x][\"fun_decl\"])\n\t\tprint(\"\\t\\t function locals:\")\n\t\tfor y in function_types[x][\"fun_locals\"]:\n\t\t\tprint(\"\\t\\t\\t\\t\",y[0][1]['name'],\" : \",y)\nprint(\"\\nGLOBAL DECLS:\\n\")\nif (give_small_output):\n\tprint(global_decls)\nelse:\n\tfor i,x in enumerate(global_decls):\n\t\tprint(global_decl_names[i],\" : \",x)\nprint(\"\\nTYPEDEFS:\\n\")\nif (give_small_output):\n\tprint(typedefs)\nelse:\n\tfor x in typedefs:\n\t\tprint(x,\" : \",typedefs[x])\nprint(\"\\nALL STRUCTS DICT:\\n\")\nif (give_small_output):\n\tprint(all_structs_dict)\nelse:\n\tfor x in all_structs_dict:\n\t\tprint(x,\" : \",all_structs_dict[x])\n\nsemantic_dict=dict()\nsemantic_dict[\"functions\"]=function_types\nsemantic_dict[\"global_decls\"]=global_decls\nsemantic_dict[\"typedefs\"]=typedefs\nsemantic_dict[\"all_structs\"]=all_structs_dict\nsemantic_dict[\"global_decl_names\"]=global_decl_names\nworking_dir=os.getcwd()\npickle.dump( semantic_dict, open( working_dir+\"/semantic_data\", \"wb\" ) )\n\n", "sub_path": "code/pycparser_scripts/scrape_function_metadata.py", "file_name": "scrape_function_metadata.py", "file_ext": "py", "file_size_in_byte": 8400, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "sys.dont_write_bytecode", "line_number": 9, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 16, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 17, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 18, "usage_type": "call"}, {"api_name": "pycparser.c_ast.parse_file", "line_number": 20, "usage_type": "call"}, {"api_name": "pycparser.c_ast", "line_number": 20, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pycparser.c_ast.c_ast", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pycparser.c_ast", "line_number": 44, "usage_type": "name"}, {"api_name": "pycparser.c_ast.c_ast", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pycparser.c_ast", "line_number": 45, "usage_type": "name"}, {"api_name": "pycparser.c_ast.c_ast", "line_number": 51, "usage_type": "attribute"}, {"api_name": "pycparser.c_ast", "line_number": 51, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 62, "usage_type": "call"}, {"api_name": "pycparser.c_ast.c_ast", "line_number": 65, "usage_type": "attribute"}, {"api_name": "pycparser.c_ast", "line_number": 65, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 87, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 92, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 95, "usage_type": "call"}, {"api_name": "pycparser.c_ast.c_ast", "line_number": 100, "usage_type": "attribute"}, {"api_name": "pycparser.c_ast", "line_number": 100, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 105, "usage_type": "call"}, {"api_name": "pycparser.c_ast.c_ast", "line_number": 108, "usage_type": "attribute"}, {"api_name": "pycparser.c_ast", "line_number": 108, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 115, "usage_type": "call"}, {"api_name": "pycparser.c_ast.c_ast", "line_number": 118, "usage_type": "attribute"}, {"api_name": "pycparser.c_ast", "line_number": 118, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 126, "usage_type": "call"}, {"api_name": "pycparser.c_ast.c_ast", "line_number": 129, "usage_type": "attribute"}, {"api_name": "pycparser.c_ast", "line_number": 129, "usage_type": "name"}, {"api_name": "pycparser.c_ast.c_ast", "line_number": 131, "usage_type": "attribute"}, {"api_name": "pycparser.c_ast", "line_number": 131, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 133, "usage_type": "call"}, {"api_name": "pycparser.c_ast.c_ast", "line_number": 135, "usage_type": "attribute"}, {"api_name": "pycparser.c_ast", "line_number": 135, "usage_type": "name"}, {"api_name": "pycparser.c_ast.c_ast", "line_number": 153, "usage_type": "attribute"}, {"api_name": "pycparser.c_ast", "line_number": 153, "usage_type": "name"}, {"api_name": "pycparser.c_ast.c_ast", "line_number": 156, "usage_type": "attribute"}, {"api_name": "pycparser.c_ast", "line_number": 156, "usage_type": "name"}, {"api_name": "pycparser.c_ast.c_ast", "line_number": 162, "usage_type": "attribute"}, {"api_name": "pycparser.c_ast", "line_number": 162, "usage_type": "name"}, {"api_name": "pycparser.c_ast.c_ast", "line_number": 165, "usage_type": "attribute"}, {"api_name": "pycparser.c_ast", "line_number": 165, "usage_type": "name"}, {"api_name": "os.getcwd", "line_number": 206, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 207, "usage_type": "call"}]} +{"seq_id": "490094540", "text": "from flask import Flask, render_template, request\n\napp = Flask(__name__)\n\n@app.route('/')\ndef upload():\n return render_template('upload.html')\n\n@app.route('/check', methods=['POST'])\ndef check():\n if request.method == 'POST':\n global result\n result= request.files['voice']\n return result.filename\n #return render_template('check.html',result = result)\n\nif __name__ == '__main__':\n app.run(host = '127.0.0.1', port=8080, debug=True)", "sub_path": "flask_back/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 468, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "flask.Flask", "line_number": 3, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 11, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 11, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 13, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 13, "usage_type": "name"}]} +{"seq_id": "31482825", "text": "import sys\nimport json\nimport threading\n\nfrom twisted.internet import reactor\nfrom autobahn.twisted.websocket import WebSocketClientFactory, \\\n WebSocketClientProtocol, \\\n connectWS\n\nbuzzed = threading.Event()\n\nclass BroadcastClientProtocol(WebSocketClientProtocol):\n\n def sendHello(self):\n self.sendMessage(json.dumps(False).encode('utf8'))\n\n def onOpen(self):\n self.sendHello()\n\n def onMessage(self, payload, isBinary):\n global buzzed\n if not isBinary:\n print(\"Text message received: {}\".format(payload.decode('utf8')))\n if buzzed:\n print('+++')\n\nclass KeypressPoller(threading.Thread):\n\n def run(self):\n global buzzed\n ch = sys.stdin.read(1)\n if ch == 'b':\n buzzed.set()\n else:\n buzzed.clear()\n\nif __name__ == '__main__':\n poller = KeypressPoller()\n poller.start()\n factory = WebSocketClientFactory(\"ws://127.0.0.1:9000\")\n factory.protocol = BroadcastClientProtocol\n connectWS(factory)\n reactor.run()\n", "sub_path": "broadcast/client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 1055, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "threading.Event", "line_number": 10, "usage_type": "call"}, {"api_name": "autobahn.twisted.websocket.WebSocketClientProtocol", "line_number": 12, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 15, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 27, "usage_type": "attribute"}, {"api_name": "sys.stdin.read", "line_number": 31, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 31, "usage_type": "attribute"}, {"api_name": "autobahn.twisted.websocket.WebSocketClientFactory", "line_number": 40, "usage_type": "call"}, {"api_name": "autobahn.twisted.websocket.connectWS", "line_number": 42, "usage_type": "call"}, {"api_name": "twisted.internet.reactor.run", "line_number": 43, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 43, "usage_type": "name"}]} +{"seq_id": "578318378", "text": "#Biblioteca necessária pra fazer a request na API\n#Baixe ela com o comando pip install requests\nimport requests \n\n#Aqui fazemos a requisição pra API e já passamos para JSON\ngames = requests.get(\"https://worldcup.sfg.io/matches/current\").json()\n\n#Nesse loop pegamos cada jogo da requisição acima e guardamos na variável \"game\"\nfor game in games:\n #Se o \"status\" desse jogo for \"completed\".\n #Mude pra \"in progress\" senão só vai ver os jogos já terminados!\n if(game['status'] == \"in progress\"):\n #if(game['stage_name'] == \"First stage\"):\n \n \n # ht = game['home_team_country']\n # at = game['away_team_country']\n # hg = game['home_team']['goals']\n #ag = game['away_team']['goals']\n venue = game['venue']\n \n \n #Salvando as informações em variáveis para ficar mais facil de entender\n home_team = game['home_team']['code']\n home_score = game['home_team']['goals']\n away_team = game['away_team']['code']\n away_score = game['away_team']['goals']\n celsius = game['weather']['temp_celsius']\n \n #Print do resultado dos jogos\n \n print(\"{} {} X {} {}\".format(home_team, home_score, away_team, away_score))\n\n # print(\"Temperatura = {} Estadio = {} TIMES : {} {} X {} {}\".format(celsius, venue, home_team, home_score, away_team, away_score))\n # print(\"{} : {} X {} : {} IN {}\".format(ht, hg, at, ag, venue))\n\ninput(\"Pressione qualquer tecla para sair...\")\n", "sub_path": "Copa/copa5(inprogress).py", "file_name": "copa5(inprogress).py", "file_ext": "py", "file_size_in_byte": 1501, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "requests.get", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "28957416", "text": "import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport glob\nimport pickle\nimport time\nimport datetime\n\ndebug = False\n\nDISTORTION_COEFF = None\nCAMERA_MATRIX = None\n\ndef calibrateCamera (calibrationImgLocation, chessBoardSizeX, chessBoardSizeY, forceRecalibrate):\n global CAMERA_MATRIX, DISTORTION_COEFF\n\n if (forceRecalibrate == False):\n try:\n dist_pickle = pickle.load(open(\"./my_cached_data/calibration_data.p\", \"rb\"))\n TIME = dist_pickle[\"TIME\"]\n CAMERA_MATRIX = dist_pickle[\"CAMERA_MATRIX\"]\n DISTORTION_COEFF = dist_pickle[\"DISTORTION_COEFF\"]\n if debug: print(\"Loaded prior calibration data from \" + TIME + \". Send forceRecalibrate=True for recomputation.\")\n return\n except:\n print(\"Could not load from prior calibration data. Recomputing calibration...\")\n pass\n\n calibrationImages = glob.glob(calibrationImgLocation + \"/calibration*.jpg\")\n objPoints = [] # 3D points in real world space\n imgPoints = [] # 2D points in image space\n objp = np.zeros((chessBoardSizeX*chessBoardSizeY, 3), np.float32)\n objp[:, :2] = np.mgrid[0:chessBoardSizeX, 0:chessBoardSizeY].T.reshape(-1, 2) # x, y, coordinates\n\n for fname in calibrationImages:\n print(\"Processing calibration image: \" + fname)\n\n # read the image\n img = mpimg.imread(fname)\n\n # convert to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # find chessboard corners\n ret, corners = cv2.findChessboardCorners(gray, (chessBoardSizeX, chessBoardSizeY), None)\n\n if ret == True:\n imgPoints.append(corners)\n objPoints.append(objp)\n\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objPoints, imgPoints, gray.shape[::-1], None, None)\n CAMERA_MATRIX = mtx\n DISTORTION_COEFF = dist\n\n print (\"Saving distortion coefficients and matrix for future calculations...\")\n dist_pickle = {\"TIME\":datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), \"CAMERA_MATRIX\":CAMERA_MATRIX, \"DISTORTION_COEFF\": DISTORTION_COEFF}\n pickle.dump(dist_pickle, open(\"./my_cached_data/calibration_data.p\", \"wb\"))\n\n\n# Performs image distortion correction based on the computed distortion coefficients and camera matrix\n# returns the undistorted image\ndef undistortImage (img):\n if debug: print(\"Undistorting with... \" + str(DISTORTION_COEFF))\n\n undist = cv2.undistort(img, CAMERA_MATRIX, DISTORTION_COEFF, None, CAMERA_MATRIX)\n\n if debug:\n plt.imshow(undist)\n plt.show()\n\n return undist\n\ndef printInfo():\n print (CAMERA_MATRIX)", "sub_path": "camera_calibrate.py", "file_name": "camera_calibrate.py", "file_ext": "py", "file_size_in_byte": 2687, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pickle.load", "line_number": 20, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 33, "usage_type": "attribute"}, {"api_name": "numpy.mgrid", "line_number": 34, "usage_type": "attribute"}, {"api_name": "matplotlib.image.imread", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.image", "line_number": 40, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 43, "usage_type": "attribute"}, {"api_name": "cv2.findChessboardCorners", "line_number": 46, "usage_type": "call"}, {"api_name": "cv2.calibrateCamera", "line_number": 52, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 57, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 57, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 57, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 58, "usage_type": "call"}, {"api_name": "cv2.undistort", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}]} +{"seq_id": "195291816", "text": "from sklearn.ensemble import RandomForestClassifier\n\nclass Domain:\n def __init__(self, _name, _label):\n self.name = _name\n self.label = _label\n self.domain_len = len(_name)\n sumofnum = 0\n for c in _name:\n if c.isdigit():\n sumofnum += int(c)\n self.sum_of_num = sumofnum\n\n def returnData(self):\n return [self.domain_len, self.sum_of_num]\n\n def returnLabel(self):\n if self.label == \"notdga\":\n return 0\n else:\n return 1\n\ndef initTrainData(filename):\n domainlist = []\n with open(filename) as f:\n for line in f:\n line = line.strip()\n if line.startswith(\"#\") or line == \"\":\n continue\n tokens = line.split(\",\")\n name = tokens[0]\n label = tokens[1]\n domainlist.append(Domain(name, label))\n return domainlist\n\n\ndef initTestData(filename):\n domainlist = []\n featurelist = []\n with open(filename) as f:\n for line in f:\n line = line.strip()\n if line.startswith(\"#\") or line == \"\":\n continue\n sum_of_num = 0\n for c in line:\n if c.isdigit():\n sum_of_num += int(c)\n testData = [len(line), sum_of_num]\n domainlist.append(line)\n featurelist.append(testData)\n return domainlist, featurelist\n\ndef output(content, filename):\n with open(filename, \"w\") as f:\n for i in content:\n f.write(i + \"\\n\")\n\ndef main():\n trainSet = initTrainData(\"train.txt\")\n testDomainName, testFeatureSet = initTestData(\"test.txt\")\n featureMatrix = []\n labelList = []\n for item in trainSet:\n featureMatrix.append(item.returnData())\n labelList.append(item.returnLabel())\n clf = RandomForestClassifier(random_state=0)\n clf.fit(featureMatrix, labelList)\n classification = clf.predict(testFeatureSet)\n result = []\n for i in range(len(classification)):\n if classification[i] == 1:\n result.append(testDomainName[i] + \",dga\")\n else:\n result.append(testDomainName[i] + \",notdga\")\n output(result, \"result.txt\")\n\nif __name__ == '__main__':\n main()\n", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 2260, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "106780758", "text": "import discord, os, random, time, json\nfrom discord import Embed, Colour, Member, User\nfrom discord.ext import commands\nfrom typing import Union\n\nconfig = json.load(open('config.json'))\nembed_toggle= config[\"embed_toggle\"]\n\nclient = commands.Bot(command_prefix=config[\"prefix\"], pm_help=True, owner_id=702954010008748174, case_insensitive=True)\n\nclient.remove_command(\"help\")\nclient._uptime = None\n\n@client.event\nasync def on_connect():\n\tif client._uptime is None:\n\t\tprint(f\"Connected to Discord. Getting ready...\")\n\t\tprint(f'-----------------------------')\n\n@client.event\nasync def on_ready():\n await client.change_presence(activity=discord.Game(name=\"DM fire#7010 for a custom bot\"))\n\n@client.command(usage=\"Learn how to join a role\")\nasync def How_do_I_join_Xetiq(ctx):\n join=\"\"\n join+=\"1. What server are you \\n\"\n join+=\"-NAE \\n\"\n join+=\"-NAW \\n\"\n join+=\"-EU \\n\"\n join+=\"-ASIA \\n\"\n join+=\"-OCE \\n \\n\"\n join+=\"2. What do you want to join as \\n \\n\"\n join+=\"Cretive Warrior: U need to know to do retakes and edit good. \\n \\n\"\n join+=\"Comp Player: U need at least 1k in arena. Play alot of solos/duos/squads \\n \\n\"\n join+=\"TrickShotter: Need to do good trickshots or insane trickshots to join as a Trickshotter \\n \\n\"\n join+=\"Content Creator: U need atleast 100+ subs to be a content creator \\n \\n\"\n join+=\"GFX/VFX: U have to make good work and dm staff or owners ur work \\n \\n\"\n join+=\"And thats how u Join Xetiq so try to join this clan and be apart with the members in the clan!\"\n if(embed_toggle=='0'):\n await ctx.send(join)\n if(embed_toggle=='1'):\n join_embed=discord.Embed(color=0x0000, title=\"How to join\", description=join)\n join_embed.set_footer(text=\"DM fire#7010 for a custom bot\") \n await ctx.send(embed=join_embed)\n\n \n@client.command(usage=\"Gives help about commands\")\nasync def help(ctx):\n help= \"**\"\n for command in client.commands:\n help+=f\"{command}- `{command.usage}`\\n\"\n help+=\"**\" \n if(embed_toggle=='0'):\n await ctx.send(help)\n if(embed_toggle=='1'):\n help_embed=discord.Embed(color=0x0000, title=\"My Commands\", description=help)\n help_embed.set_footer(text=\"DM fire#7010 for a custom bot\") \n help_embed.set_thumbnail(url='https://image.ibb.co/caM2BK/help.gif')\n help_embed.set_image(url='https://media.giphy.com/media/OkJat1YNdoD3W/giphy.gif')\n\n await ctx.send(embed=help_embed)\n \n@client.command(hidden=True)\nasync def Congratulations(ctx, *, arg=None):\n if(ctx.message.author.id==702954010008748174):\n if arg == None:\n if(embed_toggle=='0'):\n await ctx.send(\"Error: Please specify who you want to congratulate\")\n if(embed_toggle=='1'):\n Congratulations_embed_empty = discord.Embed(\n color=0xFF0000\n )\n Congratulations_embed_empty.add_field(name='Error', value=\"Please specify who you want to congratulate\", inline=False)\n Congratulations_embed_empty.set_footer(text=\"DM fire#7010 for a custom bot\") \n await ctx.send(embed=Congratulations_embed_empty)\n else:\n if(embed_toggle=='0'):\n await ctx.send(f\"{ctx.message.author.name}: Congratulates {arg} for joing the team\")\n if(embed_toggle=='1'):\n Congratulations_embed_full = discord.Embed(\n color=0x2ECC7\n )\n Congratulations_embed_full.add_field(name=f'{ctx.message.author.name}', value=f\"Congratulates {arg} for joing the team\", inline=False)\n Congratulations_embed_full.set_footer(text=\"DM fire#7010 for a custom bot\") \n await ctx.send(embed=Congratulations_embed_full)\n else:\n if(embed_toggle=='0'):\n await ctx.send(\"Error: You do not have permission to do this\")\n if(embed_toggle=='1'):\n Congratulations_embed_error = discord.Embed(\n color=0xFF0000\n )\n Congratulations_embed_error.add_field(name=\"Error\", value=\"You do not have permission to do this\")\n Congratulations_embed_error.set_footer(text=\"DM fire#7010 for a custom bot\") \n await ctx.send(embed=Congratulations_embed_error)\n\nfor filename in os.listdir('./cogs'):\n if filename.endswith('.py'):\n client.load_extension(f'cogs.{filename[:-3]}')\n\t\t\nclient.run(config[\"token\"])", "sub_path": "Bot.py", "file_name": "Bot.py", "file_ext": "py", "file_size_in_byte": 4178, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "json.load", "line_number": 6, "usage_type": "call"}, {"api_name": "discord.ext.commands.Bot", "line_number": 9, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 9, "usage_type": "name"}, {"api_name": "discord.Game", "line_number": 22, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 43, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 57, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 71, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 81, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 91, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 98, "usage_type": "call"}]} +{"seq_id": "570458672", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 28 22:00:26 2016\n\n@author: Simon\n\"\"\"\n\nimport shapefile as sf\nimport csv\nimport pyproj\n\nregion = sf.Reader('AIM_BIKEWAY_SECTIONS_2015.shp')\ngeomet = region.shapeRecords()\n\n# prepare for geographical coordinate system conversion\ncoord_proj = pyproj.Proj(init = 'EPSG:28356')\n\n# prepare CSV file\nheaders = ['PathID', 'Path Order', 'Lat', 'Long', 'Street', 'Suburb', 'Material',\\\n 'Traffic Type', 'Path Length']\n\nrows = []\n\nfor geo in geomet:\n coords = geo.shape.points\n attrib = geo.record\n \n path_id = attrib[0]\n street = attrib[7]\n suburb = attrib[8]\n material = attrib[3]\n traffic_type = attrib[2]\n path_leng = attrib[11]\n \n count = 1\n for coord in coords:\n x = coord[0]\n y = coord[1]\n \n # convert\n long, lat = coord_proj(x,y,inverse = True)\n \n row = dict()\n row['PathID'] = path_id\n row['Path Order'] = count\n row['Lat'] = lat\n row['Long'] = long\n row['Street'] = street\n row['Suburb'] = suburb\n row['Material'] = material\n row['Traffic Type'] = traffic_type\n row['Path Length'] = path_leng\n rows.append(row)\n count += 1\n \n\n# Write CSV file\nwith open('bike-path.csv', 'w') as csvfile:\n fieldnames = headers\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n writer.writeheader()\n for row in rows:\n writer.writerow(row)", "sub_path": "get-geo-data.py", "file_name": "get-geo-data.py", "file_ext": "py", "file_size_in_byte": 1462, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "shapefile.Reader", "line_number": 12, "usage_type": "call"}, {"api_name": "pyproj.Proj", "line_number": 16, "usage_type": "call"}, {"api_name": "csv.DictWriter", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "55051663", "text": "from flask import request, session, current_app\nfrom flask_socketio import emit, join_room, leave_room\nfrom .. import socketio\n# from .. import scheduler\n# from .. import active_tasks as all_tasks\n# from .. import schedule_task\n# from .. import get_p4p\n\nfrom libs.CeleryTasks import tasks\nfrom datetime import datetime\nfrom libs.json import JSON\nimport os\nimport tkinter\nfrom tkinter import filedialog\n\nimport os\nimport re\nimport time\nimport pendulum\n\nfrom selenium import webdriver\n\nfrom libs.alibaba.alibaba import Alibaba\nfrom libs.crawlers.keywords_crawler_alibaba import KwclrAlibaba\nfrom libs.crawlers.keywords_crawler_ali_sr import KwclrAliSr\nfrom libs.crawlers.keywords_crawler_ali_sp import KwclrAliSp\nfrom libs.crawlers.keywords_crawler_amazon import KwclrAmazon\n\n\n@socketio.on('crawl_products_rankings', namespace='/markets')\ndef crawl_products_rankings(market, keywords):\n socketio.start_background_task(background_task_crawl_products_rankings, keywords, 1, socketio, '/markets', request.sid)\n\ndef background_task_crawl_products_rankings(kws, max_processes, socketio, ns, room):\n processes_count = 0\n records = []\n results = {}\n idx_kws = -1\n while True:\n while processes_count < max_processes and (idx_kws+1) < len(kws):\n idx_kws +=1\n kw = kws[idx_kws]\n kwargs={'keyword': kw, 'pages': 5}\n result = tasks.crawl_product_ranking.apply_async(kwargs=kwargs, queue='eyelash_products_ranking')\n results[kw] = result\n processes_count += 1\n \n print(kw, end=',')\n\n print()\n print('--------------')\n\n if processes_count == 0:\n break\n\n new_records = []\n finished_kws = []\n while True:\n for kw in results:\n result = results[kw]\n if result.ready():\n record = result.get()\n record['keyword'] = kw\n new_records.append(record)\n records.append(record)\n finished_kws.append(kw)\n processes_count -= 1\n \n for kw in finished_kws:\n results.pop(kw)\n if new_records:\n print('finished:', len(new_records), finished_kws)\n socketio.emit('crawl_products_rankings_finished_kws', finished_kws, namespace=ns, room=room)\n break\n else:\n time.sleep(1)\n socketio.emit('crawl_products_rankings_results', records, namespace=ns, room=room)\n\n@socketio.on('get_products_rankings', namespace='/markets')\ndef get_products_rankings(market, keywords):\n\n root = market['directory']+'_config'\n products_rankings_dir = root+'//'+'products_ranking'\n\n products_rankings = []\n for kw in keywords:\n file = products_rankings_dir + '//' + kw + '.json'\n\n if not os.path.isfile(file):\n continue\n\n obj = JSON.deserialize(root, 'products_ranking', file)\n obj['keyword'] = kw\n products_rankings.append(obj)\n return products_rankings\n\n@socketio.on('get_visitors', namespace='/markets')\ndef get_visitors(market, start_date='2018-08-01', end_date=None):\n\n start = pendulum.parse(start_date)\n if end_date:\n end = pendulum.parse(end_date)\n else:\n end = pendulum.now()\n\n root = market['directory']+'_config'\n visitor_dir = root+'//'+'visitors'\n files = os.listdir(visitor_dir)\n\n visitors = []\n for f in files:\n if not f.startswith('visitors_') or not f.endswith('.json'):\n continue\n\n dt = pendulum.parse(re.search('visitors_(.*).json', f).group(1))\n if start <= dt <= end:\n visitors += JSON.deserialize(root, 'visitors', f)\n return visitors\n\n@socketio.on('get_p4p_keywords_crawl_result_file_list', namespace='/markets')\ndef get_p4p_keywords_crawl_result_file_list(market):\n root = market['directory'] + '_config'\n if not os.path.exists(root):\n os.makedirs(root)\n return [n for n in os.listdir(root) if os.path.isfile(os.path.join(root, n)) and n.startswith('p4p_keywords_crawl_result_')]\n\n\n@socketio.on('connect', namespace='/markets')\ndef connect_products():\n pass\n\n\n@socketio.on('get_market', namespace='/markets')\ndef get_market(name):\n markets = JSON.deserialize('.', 'storage', 'markets.json')\n if name in markets:\n market = markets[name]\n else:\n market = None\n return market\n\n\n@socketio.on('update_market', namespace='/markets')\ndef update_market(market):\n markets = JSON.deserialize('.', 'storage', 'markets.json')\n key = market['name']\n if key in markets:\n markets[key] = market\n JSON.serialize(markets, '.', 'storage', 'markets.json')\n\n\n@socketio.on('add_market', namespace='/markets')\ndef add_market():\n root = tkinter.Tk()\n root.withdraw()\n path = filedialog.askdirectory(parent=root, initialdir=\"/\", title='请选择上传产品目录')\n if path:\n name = os.path.basename(path)\n path = path.replace('/', '\\\\')\n market = {'name': name, 'directory': path}\n markets = current_app.data.markets\n\n if market['name'] not in markets:\n markets[market['name']] = market\n JSON.serialize(markets, '.', 'storage', 'markets.json')\n return market\n else:\n msg = {'type': 'warning', 'content': 'The Market of '+market.name+' was already in system!'}\n emit('notify', msg, room=request.sid)\n return\n else:\n msg = {'type': 'primary', 'content': 'No directory of market was selected.'}\n emit('notify', msg, room=request.sid)\n return\n\n@socketio.on('get_all_markets', namespace='/markets')\ndef get_all_markets():\n markets = JSON.deserialize('.', 'storage', 'markets.json')\n if not markets:\n markets = {}\n return markets\n\n\n@socketio.on('get_categories', namespace='/markets')\ndef get_categories(market):\n directory = market['directory']\n categories = [name for name in os.listdir(directory) if os.path.isdir(os.path.join(directory, name))]\n sub_categories = {}\n for cat in categories:\n folder = os.path.join(directory, cat)\n sub_dirs = [n for n in os.listdir(folder) if os.path.isdir(os.path.join(folder, n))]\n sub_cat = []\n for name in sub_dirs:\n sub_folder = os.path.join(folder, name)\n ssub_folders = [n for n in os.listdir(sub_folder) if not name.lower().endswith(' serie') and os.path.isdir(os.path.join(sub_folder, n))]\n if(len(ssub_folders)>0):\n sub_cat.append(name)\n if(len(sub_cat)>0):\n sub_categories[cat] = sub_cat\n \n return {'categories': categories, 'sub_categories':sub_categories}\n\n\n@socketio.on('remove_market', namespace='/markets')\ndef remove_market(market):\n markets = JSON.deserialize('.', 'storage', 'markets.json')\n if market['name'] in markets:\n del markets[market['name']]\n JSON.serialize(markets, '.', 'storage', 'markets.json')\n return True\n else:\n msg = {'type':'warning', 'content': 'Market ' + market['name'] + ' was not found. Try Refesh Your Browser!'}\n emit('notify', msg, room=request.sid)\n return False\n\n\n@socketio.on('get_p4p_records', namespace='/markets')\ndef get_p4p_records(market, paths, date_str):\n fn_keywords = 'p4p_keywords_crawl_result_' + date_str + '.json.gz'\n fn_balance = 'p4p_balance_change_history_'+date_str+'.json.gz'\n keywords = deserialize(market, paths, fn_keywords, True)\n balance = deserialize(market, paths, fn_balance, True)\n return [keywords, balance]\n\n\n@socketio.on('serialize', namespace='/markets')\ndef serialize(obj, market, paths, filename):\n root = market['directory']+'_config'\n JSON.serialize(obj, root, paths, filename)\n return\n\n\n@socketio.on('deserialize', namespace='/markets')\ndef deserialize(market, paths, filename, shallow=False):\n root = (market['directory']+'_config')\n\n if shallow:\n return JSON.deserialize(root, paths, filename)\n\n objects = []\n while True:\n objects.append(JSON.deserialize(root, paths[:], filename))\n\n if len(paths) == 0:\n break\n\n if len(paths) and paths[-1].lower().endswith(' serie') and '_' in filename:\n filename = filename.split('_')[1]\n else:\n paths.pop()\n\n return objects\n\n\n@socketio.on('get_file_list', namespace='/markets')\ndef get_file_list(market, paths):\n root = market['directory']\n path = os.path.join(root, *paths)\n return [n for n in os.listdir(path) if os.path.isfile(os.path.join(path, n))]\n\n\n@socketio.on('get_products', namespace='/markets')\ndef get_products(market, paths):\n root = market['directory']\n path = os.path.join(root, *paths)\n folders = [d for d in os.listdir(path) if os.path.isdir(os.path.join(path, d))]\n files = {}\n for folder in folders:\n files[folder] = os.listdir(os.path.join(path, folder))\n\n attrs = {}\n root_config = (market['directory']+'_config')\n path_config = os.path.join(root_config, *paths)\n if os.path.exists(path_config):\n folders_config = os.listdir(path_config)\n for folder in folders_config:\n if not os.path.exists(os.path.join(path_config, folder)):\n continue\n if os.path.isfile(os.path.join(path_config, folder)):\n continue\n files_config = os.listdir(os.path.join(path_config, folder))\n for file in files_config:\n if not file.endswith('_attributes.json'):\n continue\n pid = file.split('_')[0]\n ps = paths[:]\n ps.append(folder)\n attrs[folder+'_'+pid] = JSON.deserialize(root_config, ps, file)\n\n return dict(folders=folders, files=files, attributes=attrs)\n\n\n@socketio.on('login_alibaba', namespace='/markets')\ndef login_alibaba(lid, lpwd):\n alibaba = current_app.data.alibaba\n if not alibaba:\n conn = [socketio, '/markets', request.sid]\n alibaba = Alibaba(lid, lpwd, socketio_connection=conn)\n current_app.data.alibaba = alibaba\n socketio.start_background_task(alibaba.login)\n\n\n@socketio.on('post_similar_products', namespace='/markets')\ndef post_similar_products(products, similar_product_id):\n alibaba = current_app.data.alibaba\n alibaba.room = request.sid\n socketio.start_background_task(backgound_post_similar_products, alibaba, products, similar_product_id)\n\n\ndef backgound_post_similar_products(alibaba, products, similar_product_id):\n start = time.time()\n counter = 0\n for product in products:\n spid = similar_product_id\n if not similar_product_id:\n spid = product['similar_ali_id']\n\n if not spid:\n msg = {'type': \"danger\", 'content': \"不能确定 相似产品的 阿里 ID\"}\n socketio.emit('notify', msg, namespace=alibaba.namespace, room=alibaba.room)\n return\n\n result = alibaba.post_similar_product(product, spid)\n\n if isinstance(result, Exception):\n print(result)\n\n else:\n print('===============================================================')\n print(alibaba.namespace, alibaba.room)\n print(result)\n socketio.emit('product_posting', result, namespace=alibaba.namespace, broadcast=True, include_self=True)\n counter += 1\n\n socketio.emit('product_posting_finished', namespace=alibaba.namespace, room=alibaba.room)\n end = time.time()\n total = end - start\n hour = total//3600\n minu = (total%3600)//60\n sec = total - hour * 3600 - minu * 60\n\n msg = {'type': \"primary\",\n 'content': \"共发布 \" + str(counter) + \" 款产品,用时 \" + str(hour) + \"小时 \" + str(minu) + \"分 \" + str(\n sec) + \"秒,平均:\" + str(round(total / counter, 2)) + \"秒\"}\n socketio.emit('notify', msg, namespace=alibaba.namespace, room=alibaba.room)\n\n\n@socketio.on('crawl_product_data_from_alibaba', namespace='/markets')\ndef crawl_product_data_from_alibaba(ali_id):\n result_message = 'crawl_product_data_from_alibaba_result'\n alibaba = current_app.data.alibaba\n alibaba.room = request.sid\n socketio.start_background_task(alibaba.crawl_product_data, result_message, ali_id)\n\n\n@socketio.on('get_posted_product_info', namespace='/markets')\ndef get_posted_product_info(page_quantity):\n alibaba = current_app.data.alibaba\n alibaba.room = request.sid\n socketio.start_background_task(alibaba.get_posted_product_info, page_quantity)\n\n\n@socketio.on('get_products_data', namespace='/markets')\ndef get_products_data(market, products):\n for p in products:\n paths = p['categories'][:]\n paths.append(p['folder'])\n p['attributes_list'] = deserialize(market, paths[:], p['pid']+'_attributes.json')\n p['template_list'] = deserialize(market, paths[:], p['pid']+'_template.json')\n return products\n\n\n@socketio.on('reserve_title', namespace='/markets')\ndef reserve_title(title, product, market):\n mutex = current_app.data.reserve_title_mutex\n with mutex:\n result = {}\n\n reserved_titles = deserialize(market, [], 'reserved_titles.json', True)\n if reserved_titles is None:\n reserved_titles = {}\n\n if title in reserved_titles:\n result['success'] = False\n result['product'] = reserved_titles[title]\n else:\n reserved_titles[title] = product\n result['success'] = True\n serialize(reserved_titles, market, [], 'reserved_titles.json')\n socketio.emit('title_reserved', {'title': title, 'product': product, 'market': market}, namespace='/markets', broadcast=True, include_self=True)\n\n return result\n\n\n@socketio.on('is_title_reserved', namespace='/markets')\ndef is_title_reserved(title, market):\n\n mutex = current_app.data.reserve_title_mutex\n with mutex:\n result = {}\n\n reserved_titles = deserialize(market, [], 'reserved_titles.json', True)\n if reserved_titles is None:\n reserved_titles = {}\n\n if title in reserved_titles:\n result['success'] = False\n result['product'] = reserved_titles[title]\n else:\n # reserved_titles[title] = product\n result['success'] = True\n # serialize(reserved_titles, market, [], 'reserved_titles.json')\n\n return result\n\n\n@socketio.on('update_products', namespace='/markets')\ndef update_products(objects):\n alibaba = current_app.data.alibaba\n alibaba.room = request.sid\n socketio.start_background_task(background_update_products, alibaba, objects)\n\ndef background_update_products(alibaba, objects):\n for idx, obj in enumerate(objects):\n alibaba.update_product(obj)\n\n@socketio.on('crawl_keywords', namespace='/markets')\ndef crawl_keywords(keyword, website, page_quantity, market):\n socketio.start_background_task(backgound_crawling_keywords, keyword, website, page_quantity, request.sid, socketio, market)\n\ndef backgound_crawling_keywords(keyword, website, page_quantity, sid, socketio, market):\n filename = 'keywords.json'\n root = market['directory'] + '_config'\n\n msg = {'type': \"primary\", 'content': \"打开浏览器 ... ...\"}\n socketio.emit('notify', msg, namespace='/markets', room=sid)\n\n chrome_options = webdriver.ChromeOptions()\n # chrome_options_headless.add_argument('--headless')\n chrome_options.add_argument('--disable-gpu')\n chrome_options.add_argument('--disable-extensions')\n chrome_options.add_argument('--disable-logging')\n chrome_options.add_argument('--ignore-certificate-errors')\n browser = webdriver.Chrome(chrome_options=chrome_options)\n\n if website == 'alibaba':\n crawler_name = re.sub(' ', '_', keyword) + ' - ' + str(page_quantity) + '页 - 阿里'\n crawler = KwclrAlibaba(browser, keyword, page_quantity, sid, socketio)\n if website == 'alibaba_sp':\n supplier = re.search('https:\\/\\/([^\\.]+)', keyword).group(1)\n category = 'all_products'\n if 'productgrouplist' in keyword:\n category = re.search('\\/([^\\/]+.html)', keyword).group(1)\n crawler_name = supplier + ' - ' + category + ' - ' + str(page_quantity) + '页 - 阿里(商家)'\n crawler = KwclrAliSp(browser, keyword, page_quantity, sid, socketio)\n if website == 'alibaba_sr':\n crawler_name = re.sub( '', '_', keyword) + ' - ' + str(page_quantity) + '页 - 阿里(橱窗)'\n crawler = KwclrAliSr(browser, keyword, page_quantity, sid, socketio)\n if website == 'amazon':\n crawler_name = re.sub(' ', '_', keyword) + ' - ' + str(page_quantity) + '页 - Amazon'\n crawler = KwclrAmazon(browser, keyword, page_quantity, sid, socketio)\n\n msg = {'type': 'primary', 'content': \"开始爬取 ... ...\"}\n socketio.emit('notify', msg, namespace='/markets', room=sid)\n\n result = crawler.start()\n\n msg = {'type': \"primary\", 'content': \"爬取结束,关闭浏览器 ... ...\"}\n socketio.emit('notify', msg, namespace='/markets', room=sid)\n browser.quit()\n\n msg = {'type': \"primary\", 'content': \"保存结果 ... ...\"}\n socketio.emit('notify', msg, namespace='/markets', room=sid)\n obj = JSON.deserialize(root, [], filename)\n if not obj:\n obj = {}\n obj[crawler_name] = result\n JSON.serialize(obj, root, [], filename)\n\n socketio.emit('keyword_crawling_result', {'key': crawler_name, 'result': result}, namespace='/markets', room=sid)\n browser.quit()\n\n@socketio.on('refresh_p4p_keywords', namespace='/markets')\ndef refresh_p4p_keywords(market):\n socketio.start_background_task(background_task_refresh_p4p_keywords, market, socketio, '/markets', request.sid)\n\ndef background_task_refresh_p4p_keywords(market, socketio, ns, room):\n p4p = get_p4p(market, socketio, room)\n keywords = p4p.crawl_keywords()\n socketio.emit('refresh_p4p_keywords_result', keywords, namespace=ns, room=room)\n", "sub_path": "app/main/events.py", "file_name": "events.py", "file_ext": "py", "file_size_in_byte": 17987, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "flask.request.sid", "line_number": 32, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 32, "usage_type": "name"}, {"api_name": "libs.CeleryTasks.tasks.crawl_product_ranking.apply_async", "line_number": 44, "usage_type": "call"}, {"api_name": "libs.CeleryTasks.tasks.crawl_product_ranking", "line_number": 44, "usage_type": "attribute"}, {"api_name": "libs.CeleryTasks.tasks", "line_number": 44, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "libs.json.JSON.deserialize", "line_number": 92, "usage_type": "call"}, {"api_name": "libs.json.JSON", "line_number": 92, "usage_type": "name"}, {"api_name": "pendulum.parse", "line_number": 100, "usage_type": "call"}, {"api_name": "pendulum.parse", "line_number": 102, "usage_type": "call"}, {"api_name": "pendulum.now", "line_number": 104, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 108, "usage_type": "call"}, {"api_name": "pendulum.parse", "line_number": 115, "usage_type": "call"}, {"api_name": "re.search", "line_number": 115, "usage_type": "call"}, {"api_name": "libs.json.JSON.deserialize", "line_number": 117, "usage_type": "call"}, {"api_name": "libs.json.JSON", "line_number": 117, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 123, "usage_type": "call"}, {"api_name": "os.path", "line_number": 123, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 124, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path", "line_number": 125, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 125, "usage_type": "call"}, {"api_name": "libs.json.JSON.deserialize", "line_number": 135, "usage_type": "call"}, {"api_name": "libs.json.JSON", "line_number": 135, "usage_type": "name"}, {"api_name": "libs.json.JSON.deserialize", "line_number": 145, "usage_type": "call"}, {"api_name": "libs.json.JSON", "line_number": 145, "usage_type": "name"}, {"api_name": "libs.json.JSON.serialize", "line_number": 149, "usage_type": "call"}, {"api_name": "libs.json.JSON", "line_number": 149, "usage_type": "name"}, {"api_name": "tkinter.Tk", "line_number": 154, "usage_type": "call"}, {"api_name": "tkinter.filedialog.askdirectory", "line_number": 156, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 156, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path", "line_number": 158, "usage_type": "attribute"}, {"api_name": "flask.current_app.data", "line_number": 161, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 161, "usage_type": "name"}, {"api_name": "libs.json.JSON.serialize", "line_number": 165, "usage_type": "call"}, {"api_name": "libs.json.JSON", "line_number": 165, "usage_type": "name"}, {"api_name": "flask_socketio.emit", "line_number": 169, "usage_type": "call"}, {"api_name": "flask.request.sid", "line_number": 169, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 169, "usage_type": "name"}, {"api_name": "flask_socketio.emit", "line_number": 173, "usage_type": "call"}, {"api_name": "flask.request.sid", "line_number": 173, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 173, "usage_type": "name"}, {"api_name": "libs.json.JSON.deserialize", "line_number": 178, "usage_type": "call"}, {"api_name": "libs.json.JSON", "line_number": 178, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 187, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 187, "usage_type": "call"}, {"api_name": "os.path", "line_number": 187, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 187, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 190, "usage_type": "call"}, {"api_name": "os.path", "line_number": 190, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 191, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 191, "usage_type": "call"}, {"api_name": "os.path", "line_number": 191, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 191, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 194, "usage_type": "call"}, {"api_name": "os.path", "line_number": 194, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 195, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 195, "usage_type": "call"}, {"api_name": "os.path", "line_number": 195, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 195, "usage_type": "call"}, {"api_name": "libs.json.JSON.deserialize", "line_number": 206, "usage_type": "call"}, {"api_name": "libs.json.JSON", "line_number": 206, "usage_type": "name"}, {"api_name": "libs.json.JSON.serialize", "line_number": 209, "usage_type": "call"}, {"api_name": "libs.json.JSON", "line_number": 209, "usage_type": "name"}, {"api_name": "flask_socketio.emit", "line_number": 213, "usage_type": "call"}, {"api_name": "flask.request.sid", "line_number": 213, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 213, "usage_type": "name"}, {"api_name": "libs.json.JSON.serialize", "line_number": 229, "usage_type": "call"}, {"api_name": "libs.json.JSON", "line_number": 229, "usage_type": "name"}, {"api_name": "libs.json.JSON.deserialize", "line_number": 238, "usage_type": "call"}, {"api_name": "libs.json.JSON", "line_number": 238, "usage_type": "name"}, {"api_name": "libs.json.JSON.deserialize", "line_number": 242, "usage_type": "call"}, {"api_name": "libs.json.JSON", "line_number": 242, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 258, "usage_type": "call"}, {"api_name": "os.path", "line_number": 258, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 259, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 259, "usage_type": "call"}, {"api_name": "os.path", "line_number": 259, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 259, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 265, "usage_type": "call"}, {"api_name": "os.path", "line_number": 265, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 266, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 266, "usage_type": "call"}, {"api_name": "os.path", "line_number": 266, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 266, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 269, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 269, "usage_type": "call"}, {"api_name": "os.path", "line_number": 269, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 273, "usage_type": "call"}, {"api_name": "os.path", "line_number": 273, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 274, "usage_type": "call"}, {"api_name": "os.path", "line_number": 274, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 275, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 277, "usage_type": "call"}, {"api_name": "os.path", "line_number": 277, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 277, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 279, "usage_type": "call"}, {"api_name": "os.path", "line_number": 279, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 279, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 281, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 281, "usage_type": "call"}, {"api_name": "os.path", "line_number": 281, "usage_type": "attribute"}, {"api_name": "libs.json.JSON.deserialize", "line_number": 288, "usage_type": "call"}, {"api_name": "libs.json.JSON", "line_number": 288, "usage_type": "name"}, {"api_name": "flask.current_app.data", "line_number": 295, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 295, "usage_type": "name"}, {"api_name": "flask.request.sid", "line_number": 297, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 297, "usage_type": "name"}, {"api_name": "libs.alibaba.alibaba.Alibaba", "line_number": 298, "usage_type": "call"}, {"api_name": "flask.current_app.data", "line_number": 299, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 299, "usage_type": "name"}, {"api_name": "flask.current_app.data", "line_number": 305, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 305, "usage_type": "name"}, {"api_name": "flask.request.sid", "line_number": 306, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 306, "usage_type": "name"}, {"api_name": "time.time", "line_number": 311, "usage_type": "call"}, {"api_name": "time.time", "line_number": 336, "usage_type": "call"}, {"api_name": "flask.current_app.data", "line_number": 351, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 351, "usage_type": "name"}, {"api_name": "flask.request.sid", "line_number": 352, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 352, "usage_type": "name"}, {"api_name": "flask.current_app.data", "line_number": 358, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 358, "usage_type": "name"}, {"api_name": "flask.request.sid", "line_number": 359, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 359, "usage_type": "name"}, {"api_name": "flask.current_app.data", "line_number": 375, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 375, "usage_type": "name"}, {"api_name": "flask.current_app.data", "line_number": 398, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 398, "usage_type": "name"}, {"api_name": "flask.current_app.data", "line_number": 419, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 419, "usage_type": "name"}, {"api_name": "flask.request.sid", "line_number": 420, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 420, "usage_type": "name"}, {"api_name": "flask.request.sid", "line_number": 429, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 429, "usage_type": "name"}, {"api_name": "selenium.webdriver.ChromeOptions", "line_number": 438, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 438, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 444, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 444, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 447, "usage_type": "call"}, {"api_name": "libs.crawlers.keywords_crawler_alibaba.KwclrAlibaba", "line_number": 448, "usage_type": "call"}, {"api_name": "re.search", "line_number": 450, "usage_type": "call"}, {"api_name": "re.search", "line_number": 453, "usage_type": "call"}, {"api_name": "libs.crawlers.keywords_crawler_ali_sp.KwclrAliSp", "line_number": 455, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 457, "usage_type": "call"}, {"api_name": "libs.crawlers.keywords_crawler_ali_sr.KwclrAliSr", "line_number": 458, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 460, "usage_type": "call"}, {"api_name": "libs.crawlers.keywords_crawler_amazon.KwclrAmazon", "line_number": 461, "usage_type": "call"}, {"api_name": "libs.json.JSON.deserialize", "line_number": 474, "usage_type": "call"}, {"api_name": "libs.json.JSON", "line_number": 474, "usage_type": "name"}, {"api_name": "libs.json.JSON.serialize", "line_number": 478, "usage_type": "call"}, {"api_name": "libs.json.JSON", "line_number": 478, "usage_type": "name"}, {"api_name": "flask.request.sid", "line_number": 485, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 485, "usage_type": "name"}]} +{"seq_id": "376508194", "text": "import title\r\nimport lists\r\nimport stats\r\nimport os.path\r\nimport os\r\nimport base64\r\ndef save():\r\n savenum = 1\r\n newsave = input(\"Would you like to save in a new file (yes)/Save in the current file? (no): \")\r\n statfile = open(\"stats.py\")\r\n statread = statfile.read()\r\n if os.path.exists(\"saves\") != True:\r\n os.mkdir(\"saves\")\r\n if newsave in lists.yes:\r\n for i in range(1, 1000):\r\n if os.path.exists(\"saves\\save\"+str(i)+\".sav\") != True:\r\n savenum = i\r\n break\r\n stats.currentsave = savenum\r\n openedsave = open(\"saves\\save\"+str(savenum)+\".sav\", \"wb\")\r\n openedsave.write(base64.b64encode(bytes(statread, \"utf-8\")))\r\n print(\"Game saved in save\"+str(savenum)+\".sav\")\r\n openedsave.close()\r\n elif newsave in lists.no:\r\n openedsave = open(\"saves\\save\"+str(stats.currentsave)+\".sav\", \"wb\")\r\n openedsave.write(base64.b64encode(bytes(statread, \"utf-8\")))\r\n print(\"Game saved in save\"+str(stats.currentsave)+\".sav\")\r\n openedsave.close()\r\n elif newsave in lists.quit:\r\n print(\"Game not saved.\")\r\n statfile.close()\r\n return False\r\n statfile.close()\r\n return True\r\ndef load(savenum, destination = \"stats.py\", silent = False):\r\n #If silent is True then print \"loaded\"\r\n statfile = open(destination, \"w\")\r\n if os.path.exists(\"saves\\save\"+str(savenum)+\".sav\"):\r\n openedsave = open(\"saves\\save\"+str(savenum)+\".sav\")\r\n openedread = openedsave.read()\r\n statfile.write(base64.b64decode(openedread).decode())\r\n if silent == False:\r\n print(\"Save loaded.\")\r\n openedsave.close()\r\n statfile.close()\r\n if destination == \"stats.py\":\r\n reread = open(\"stats.py\")\r\n editcursave = reread.readlines()\r\n editcursave[8] = \"currentsave = \"+str(savenum)+\"\\n\"\r\n rewrite = open(\"stats.py\", \"w\")\r\n for i in editcursave:\r\n rewrite.write(i)\r\n reread.close()\r\n rewrite.close()\r\n else:\r\n print(\"That save file does not exist!\")\r\n statfile.close()\r\n return False\r\n return True\r\ndef saveDelete(saveNum):\r\n if os.path.exists(\"saves\\save\"+str(saveNum)+\".sav\"):\r\n os.remove(\"saves\\save\"+str(saveNum)+\".sav\")\r\n print(\"Save deleted.\")\r\n else:\r\n print(\"That save file does not exist!\")\r\n return False\r\n return True\r\ndef listSaves():\r\n print(\"Your saves:\")\r\n for i in range(1, 1000):\r\n if os.path.exists(\"saves\\save\"+str(i)+\".sav\"):\r\n print(\"Save \"+str(i)+\":\")\r\n load(i, os.path.splitext(\"saves\\save\"+str(i)+\".sav\")[0]+\".py\", True)\r\n gotopy = open(\"goto.py\", \"w\")\r\n gotopy.write(\"import saves.save\"+str(i)+\"\\ndef main():\\n\\tprint(saves.save\"+str(i)+\".name+', Level '+str(saves.save\"+str(i)+\".level))\")\r\n gotopy.close()\r\n import goto\r\n goto.main()\r\n os.remove(os.path.splitext(\"saves\\save\"+str(i)+\".sav\")[0]+\".py\")\r\n else:\r\n if i == 1:\r\n print(\"You have no saves!\")\r\n return False\r\n break", "sub_path": "save.py", "file_name": "save.py", "file_ext": "py", "file_size_in_byte": 3185, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "os.path.exists", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 13, "usage_type": "call"}, {"api_name": "lists.yes", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "stats.currentsave", "line_number": 19, "usage_type": "attribute"}, {"api_name": "base64.b64encode", "line_number": 21, "usage_type": "call"}, {"api_name": "lists.no", "line_number": 24, "usage_type": "attribute"}, {"api_name": "stats.currentsave", "line_number": 25, "usage_type": "attribute"}, {"api_name": "base64.b64encode", "line_number": 26, "usage_type": "call"}, {"api_name": "stats.currentsave", "line_number": 27, "usage_type": "attribute"}, {"api_name": "lists.quit", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "base64.b64decode", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "goto.main", "line_number": 78, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}]} +{"seq_id": "252541527", "text": "from collections import OrderedDict\n\nimport joblib\n\nimport luigi\nfrom pandda_2.luigi_sge import SGEJobTask\n\nfrom libtbx import easy_mp\n\n\n# class Processor:\n# def __init__(self):\n# pass\n#\n# def __call__(self,\n# func,\n# args_list,\n# ):\n#\n# results = []\n# for args in args_list:\n# result = func(*args)\n# results.append(result)\n#\n# return results\n#\n# def repr(self):\n# repr = OrderedDict()\n# return repr\n\nclass Processor:\n def __init__(self):\n pass\n\n def __call__(self,\n funcs,\n output_paths=None,\n result_loader=None,\n shared_tmp_dir=None,\n ):\n results = []\n for func in funcs:\n result = func()\n results.append(result)\n\n return results\n\n def repr(self):\n repr = OrderedDict()\n return repr\n\n\nclass Task(SGEJobTask):\n func = luigi.Parameter()\n output_path = luigi.Parameter()\n\n def work(self):\n self.func()\n\n def output(self):\n return luigi.LocalTarget(str(self.output_path))\n\n\nclass ProcessorLuigi:\n\n def __init__(self,\n jobs=10,\n parallel_env=\"smp\",\n n_cpu=12,\n run_locally=False,\n h_vmem=100,\n m_mem_free=5,\n ):\n self.jobs = jobs\n self.parallel_env = parallel_env\n self.n_cpu = n_cpu\n self.run_locally = run_locally\n self.h_vmem = h_vmem\n self.m_mem_free = m_mem_free\n\n def __call__(self,\n funcs,\n output_paths=None,\n result_loader=None,\n shared_tmp_dir=None,\n ):\n tasks = [Task(func=func,\n output_path=output_path,\n shared_tmp_dir=\"/dls/science/groups/i04-1/conor_dev/pandda/lib-python/pandda/pandda_analyse_dask/luigi_test\",\n parallel_env=self.parallel_env,\n n_cpu=self.n_cpu,\n run_locally=False,\n h_vmem=self.h_vmem,\n m_mem_free=self.m_mem_free,\n )\n for func, output_path\n in zip(funcs, output_paths)\n ]\n\n luigi.build(tasks,\n local_scheduler=True,\n workers=self.jobs,\n detailed_summary=False,\n )\n\n if result_loader:\n results = [result_loader(output_path)\n for output_path\n in output_paths\n ]\n\n else:\n results = []\n\n return results\n\n def repr(self):\n repr = OrderedDict()\n repr[\"jobs\"] = self.jobs\n return repr\n\n\nclass ProcessorDict:\n def __init__(self):\n pass\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n pass\n\n def __call__(self,\n funcs,\n ):\n results = {}\n for key, func in funcs.items():\n result = func()\n results[key] = result\n\n return results\n\n def repr(self):\n repr = OrderedDict()\n return repr\n\n\nclass ProcessorDictJoblib:\n def __init__(self,\n cpus=21,\n verbosity=8,\n ):\n self.cpus = cpus\n self.verbosity = verbosity\n self.parallel = None\n\n def __enter__(self):\n self.parallel = joblib.Parallel(n_jobs=self.cpus,\n verbose=self.verbosity,\n ).__enter__()\n\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.parallel.__exit__(exc_type, exc_val, exc_tb)\n\n def __call__(self,\n funcs,\n ):\n\n if hasattr(funcs, \"keys\"):\n\n keys = funcs.keys()\n values = [funcs[key] for key in keys]\n\n processed = self.parallel(joblib.delayed(value)()\n for value\n in values\n )\n\n results = {key: processed[i]\n for i, key\n in enumerate(keys)\n }\n else:\n results = self.parallel(func\n for func\n in funcs\n )\n\n return results\n\n def repr(self):\n repr = OrderedDict()\n repr[\"cpus\"] = self.cpus\n repr[\"verbosity\"] = self.verbosity\n return repr\n\n\nclass ProcessorJoblib:\n def __init__(self,\n cpus=21,\n verbosity=8,\n ):\n self.cpus = cpus\n self.verbosity = verbosity\n\n def __call__(self, funcs):\n results = joblib.Parallel(n_jobs=self.cpus,\n verbose=self.verbosity,\n )(joblib.delayed(func)()\n for func\n in funcs\n )\n\n return results\n\n\ndef wrap_call(f):\n return f()\n\n\nclass ProcessorDictEasyMP:\n def __init__(self,\n cpus=21,\n verbosity=8,\n ):\n self.cpus = cpus\n self.verbosity = verbosity\n\n def __call__(self,\n funcs,\n ):\n keys = funcs.keys()\n values = [funcs[key] for key in keys]\n\n results = easy_mp.pool_map(fixed_func=wrap_call,\n args=values,\n processes=int(self.cpus),\n )\n\n results_dict = {key: results[i]\n for i, key\n in enumerate(keys)\n }\n\n return results_dict\n\n def repr(self):\n repr = OrderedDict()\n repr[\"cpus\"] = self.cpus\n repr[\"verbosity\"] = self.verbosity\n return repr\n", "sub_path": "pandda_2/processor.py", "file_name": "processor.py", "file_ext": "py", "file_size_in_byte": 6218, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "collections.OrderedDict", "line_number": 49, "usage_type": "call"}, {"api_name": "pandda_2.luigi_sge.SGEJobTask", "line_number": 53, "usage_type": "name"}, {"api_name": "luigi.Parameter", "line_number": 54, "usage_type": "call"}, {"api_name": "luigi.Parameter", "line_number": 55, "usage_type": "call"}, {"api_name": "luigi.LocalTarget", "line_number": 61, "usage_type": "call"}, {"api_name": "luigi.build", "line_number": 100, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 118, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 144, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 158, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 176, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 194, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 209, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 211, "usage_type": "call"}, {"api_name": "libtbx.easy_mp.pool_map", "line_number": 237, "usage_type": "call"}, {"api_name": "libtbx.easy_mp", "line_number": 237, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 250, "usage_type": "call"}]} +{"seq_id": "229052274", "text": "from PigGame.INTERFACE.iPlayer import action\nfrom random import randint\nfrom PigGame.COMMON.Logs import logPrint\nimport json,os,sys,locale\n\n\n###choosing the language\nlanguage=locale.setlocale(locale.LC_ALL, '')\nlanguage=language[0:2]\n###choosing the json for selected language\npath=\"../DTO/res_\"+language+\".json\"\n####setting the path of json file\nabsfilepath=os.path.abspath(__file__)\nfileDir = os.path.dirname(absfilepath)\nfilename = os.path.join(fileDir, path)\nfilename = os.path.abspath(os.path.realpath(filename))\nreadJson=open(filename, \"r\")\ndata = json.load(readJson)\nlog = logPrint()\n\n\nclass human(action):\n\n @property\n def Name(self):\n return self.__name\n @Name.setter\n def Name(self,name=None):\n if(name==None):\n self.__name=self.declareHuman()\n else:\n self.__name=name\n log.printLog(data[\"i\"], data[\"take name\"] + self.__name)\n\n @property\n def TempScore(self):\n return self.__tempScore\n @TempScore.setter\n def TempScore(self,tempscore):\n self.__tempScore=tempscore\n\n @property\n def FinalScore(self):\n return self.__finalScore\n\n @FinalScore.setter\n def FinalScore(self, finalScore):\n self.__finalScore = finalScore\n\n def declareHuman(self):\n self.__name= input(data[\"take name\"])\n log.printLog(data[\"i\"],data[\"take name\"]+self.__name)\n return self.__name\n\n def choice(self,playername):\n self.__choicevalue = input(playername+' '+data[\"roll or hold\"])\n log.printLog(data[\"i\"], data[\"roll or hold\"] + self.__choicevalue)\n if(self.__choicevalue.lower() in ('r','h')):\n return self.__choicevalue\n else:\n print(\"\\n\"+data[\"wrong roll hold\"])\n log.printLog(data[\"w\"],data[\"wrong roll hold\"])\n return 'h'\n\n\n def role(self):\n self.__x= randint(0,19)\n if (self.__x==0 ):\n print(data[\"human scored\"],self.__x)\n log.printLog(data[\"i\"], data[\"human scored\"]+str(self.__x))\n print(data[\"human chance over\"])\n log.printLog(data[\"i\"],data[\"human chance over\"])\n return False\n\n elif(self.__x>6):\n self.__x=self.__x%6\n print(data[\"human scored\"], self.__x)\n log.printLog(data[\"i\"], data[\"human scored\"]+str(self.__x))\n return self.__x\n\n else:\n print(data[\"human scored\"], self.__x)\n log.printLog(data[\"i\"], data[\"human scored\"]+str(self.__x))\n return self.__x\n\n def hold(self):\n print(data[\"human hold chance\"])\n log.printLog(data[\"i\"],data[\"human hold chance\"])\n return 1\n\n", "sub_path": "COMMON/humanPlayer.py", "file_name": "humanPlayer.py", "file_ext": "py", "file_size_in_byte": 2665, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "locale.setlocale", "line_number": 8, "usage_type": "call"}, {"api_name": "locale.LC_ALL", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 16, "usage_type": "call"}, {"api_name": "json.load", "line_number": 18, "usage_type": "call"}, {"api_name": "PigGame.COMMON.Logs.logPrint", "line_number": 19, "usage_type": "call"}, {"api_name": "PigGame.INTERFACE.iPlayer.action", "line_number": 22, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "435225342", "text": "import json\nimport os\nimport traceback\nfrom multiprocessing.dummy import Pool\n\nimport bottle\nimport requests\nfrom beaker.middleware import SessionMiddleware\nfrom bottle import request, post, get, HTTPError, HTTPResponse, hook\nfrom cork import Cork\n\nimport eu.softfire.tub.exceptions.exceptions as exceptions\nfrom eu.softfire.tub.core import CoreManagers\nfrom eu.softfire.tub.core.CoreManagers import get_resources_dict, Experiment, \\\n get_experiment_dict, add_resource, get_other_resources\nfrom eu.softfire.tub.core.calendar import CalendarManager\nfrom eu.softfire.tub.core.certificate import CertificateGenerator, log_certificate_create\nfrom eu.softfire.tub.utils.static_config import CONFIGURATION_FOLDER\nfrom eu.softfire.tub.utils.utils import get_config, get_logger\n\nlogger = get_logger('eu.softfire.tub.api')\nbottle.TEMPLATE_PATH = [get_config('api', 'view-path', '/etc/softfire/views')]\naaa = Cork(get_config(\"api\", \"cork-files-path\", \"/etc/softfire/users\"))\nauthorize = aaa.make_auth_decorator(fail_redirect=\"/login\")\ncreate_user_thread = None\ncreate_user_thread_pool = Pool(20)\n\n\n@hook('after_request')\ndef maintenance():\n try:\n if request.environ.get('bottle.raw_path') == '/login' or aaa.current_user.role == 'admin':\n return\n except:\n return\n if CoreManagers.maintenance:\n raise HTTPResponse(\"Under maintenance. Please try again in a few minutes.\")\n\n######################\n# Experimenters urls #\n######################\n\n@post('/provide_resources')\n@authorize(role='experimenter')\ndef provide_resources():\n experiment_id = post_get('experiment_id')\n CoreManagers.provide_resources(aaa.current_user.username, experiment_id)\n bottle.redirect('/experimenter')\n\n\n@post('/release_resources')\n@authorize(role='experimenter')\ndef delete_resources():\n experiment_id = post_get('experiment_id')\n CoreManagers.release_resources(aaa.current_user.username, experiment_id)\n bottle.redirect('/experimenter')\n\n\n@get('/refresh_images')\n@authorize(role='experimenter')\ndef refresh_resources():\n CoreManagers.refresh_resources(aaa.current_user.username)\n CoreManagers.list_resources()\n\n\n@get('/get_full_status')\n@authorize(role='experimenter')\ndef get_full_status():\n _, _, experiment_dict = CoreManagers.get_experiment_dict(aaa.current_user.username)\n # convert string values that represent json into dictionaries so that they are displayed nicely\n for e in experiment_dict:\n try:\n parsed_value = json.loads(e.get('value'))\n e['value'] = parsed_value\n except:\n pass\n bottle.response.headers['Content-Type'] = 'application/json'\n return json.dumps(experiment_dict, indent=4, sort_keys=True)\n\n\n@get('/get_status')\n@authorize(role='experimenter')\ndef get_status():\n _, _, experiment_dict = CoreManagers.get_experiment_dict(aaa.current_user.username)\n experiment_dict = __format_experiment_dict(experiment_dict)\n bottle.response.headers['Content-Type'] = 'application/json'\n return json.dumps(experiment_dict)\n\n\n@post('/check_user')\n@authorize(role='portal')\ndef get_status():\n username = post_get('username')\n user = aaa.user(username)\n bottle.response.headers['Content-Type'] = 'application/json'\n if user:\n return HTTPResponse(body=json.dumps(dict(msg=\"User %s exists\" % username, ok=True)), status=200)\n else:\n return HTTPResponse(body=\"User %s does not exists\" % username, status=404)\n\n\n@post('/reserve_resources')\n@authorize(role='experimenter')\ndef book_resources():\n data = request.files.get('data')\n logger.debug(\"files: %s\" % list(request.files.keys()))\n for file in request.files:\n logger.debug(\"file %s\" % file)\n logger.debug(\"Data: '%s'\" % data)\n # logger.debug(\"Data.file: %s\" % data.file)\n if data and data.file:\n Experiment(data.file, username=aaa.current_user.username).reserve()\n bottle.redirect('/experimenter')\n logger.debug((\"got body: %s\" % request.body.read()))\n raise FileNotFoundError(\"File not found in your request\")\n\n\n@post('/add_resource')\n@authorize(role='experimenter')\ndef create_resource():\n resource_id = request.forms.get('id')\n node_type = request.forms.get('node_type')\n cardinality = request.forms.get('cardinality')\n description = request.forms.get('description')\n testbed = request.forms.get('testbed')\n upload = request.files.get('upload')\n add_resource(aaa.current_user.username, resource_id, node_type, cardinality, description, testbed, upload)\n bottle.redirect('/experimenter')\n\n\n#################\n# General pages #\n#################\n\n@bottle.post('/login')\ndef login():\n \"\"\"Authenticate users\"\"\"\n username = post_get('username')\n password = post_get('password')\n if not aaa.login(username, password):\n return dict(ok=False, msg=\"Username or password invalid\")\n if aaa.current_user.role == 'admin':\n return dict(\n ok=True,\n redirect=\"/admin\"\n )\n else:\n return dict(\n ok=True,\n redirect=\"/experimenter\"\n )\n\n\n@bottle.route('/logout')\ndef logout():\n aaa.logout(success_redirect='/login')\n\n\n@bottle.post('/register')\ndef register():\n \"\"\"Send out registration email\"\"\"\n logger.debug((\"got body: %s\" % request.body.read().decode(\"utf-8\")))\n if check_if_authorized(post_get('username')):\n aaa.create_user(post_get('username'), 'user', post_get('password'))\n else:\n return dict(\n ok=False,\n msg=\"username not pre authorized\"\n )\n return 'User created'\n\n\n# @bottle.post('/reset_password')\n# def send_password_reset_email():\n# \"\"\"Send out password reset email\"\"\"\n# aaa.send_password_reset_email(\n# username=post_get('username'),\n# email_addr=post_get('email_address')\n# )\n# return 'Please check your mailbox.'\n\n\n# @bottle.post('/change_password')\n# def change_password():\n# \"\"\"Change password\"\"\"\n# aaa.reset_password(post_get('reset_code'), post_get('password'))\n# return 'Thanks. Go to login'\n\n\n@bottle.route('/')\n@authorize()\ndef index():\n \"\"\"Only authenticated users can see this\"\"\"\n return 'Welcome!
' \\\n 'Admin page
' \\\n 'Experimenter
' \\\n 'Logout
'\n\n\n@bottle.route('/my_role')\ndef show_current_user_role():\n \"\"\"Show current user role\"\"\"\n session = bottle.request.environ.get('beaker.session')\n logger.debug(\"Session from simple_webapp\", repr(session))\n aaa.require(fail_redirect='/login')\n return aaa.current_user.role\n\n\n###############\n# Admin pages #\n###############\n\n@bottle.post('/add-authorized-experimenter')\n@authorize(role='admin')\ndef register():\n \"\"\"Send out registration email\"\"\"\n logger.debug((\"got body: %s\" % request.body.read().decode(\"utf-8\")))\n add_authorized_experimenter(post_get('username'))\n return 'User created'\n\n\n@bottle.post('/certificates')\n# @authorize(role='admin')\n@authorize(role='portal')\ndef get_certificate():\n username = post_get('username')\n if not username:\n raise bottle.HTTPError(500, \"Username missing\")\n password = post_get('password', default=None)\n days = int(post_get('days', default=None))\n cert_gen = CertificateGenerator()\n log_certificate_create(username, days)\n cert_gen.generate(password, username, days)\n openvpn_config = cert_gen.get_openvpn_config()\n headers = {\n 'Content-Type': 'text/plain;charset=UTF-8',\n 'Content-Disposition': 'attachment; filename=\"softfire-vpn_%s.ovpn\"' % username,\n \"Content-Length\": len(openvpn_config)\n }\n return bottle.HTTPResponse(openvpn_config, 200, **headers)\n\n\n@bottle.post('/create_user')\n@authorize(role='portal')\ndef create_user():\n password = postd().password\n role = postd().role\n username = postd().username\n create_user_thread_pool.apply_async(create_user_thread_function, args=(username, password, role,))\n return HTTPResponse(\"Creating user %s in progress\" % username, status=202)\n\n\n@bottle.post('/refresh_user')\n@authorize(role='admin')\ndef refresh_user():\n global create_user_thread\n username = postd().username\n CoreManagers.refresh_user(username)\n return HTTPResponse(\"Refreshed user %s.\" % username, status=200)\n\n\n@bottle.post('/delete_user')\n@authorize(role='portal')\ndef delete_user():\n username = post_get('username')\n try:\n CoreManagers.delete_user(username)\n aaa.delete_user(username)\n return dict(ok=True, msg='Deleted {}'.format(username))\n except Exception as e:\n error_message = 'Deletion of user {} failed: {}'.format(username, str(e))\n logger.error(error_message)\n traceback.print_exc()\n bottle.response.status = 400\n return dict(ok=False, msg=error_message)\n\n\n@bottle.post('/create_role')\n@authorize(role='admin')\ndef create_role():\n aaa.create_role(post_get('role'), int(post_get('level')))\n return dict(ok=True, msg='')\n\n\n@bottle.post('/delete_role')\n@authorize(role='admin')\ndef delete_role():\n aaa.delete_role(post_get('role'))\n return dict(ok=True, msg='')\n\n\n@get('/get_resources')\n@authorize(role='admin')\ndef get_status():\n resources = CoreManagers.get_all_resources()\n bottle.response.headers['Content-Type'] = 'application/json'\n return json.dumps(resources)\n\n\n@get('/experimenters')\n@authorize(role='admin')\ndef get_status():\n bottle.response.headers['Content-Type'] = 'application/json'\n return json.dumps(CoreManagers.list_experimenters())\n\n@bottle.post('/enable_maintenance')\n@authorize(role='admin')\ndef enable_maintenance():\n \"\"\"Enable maintenance mode so that experimenters cannot execute actions anymore.\"\"\"\n CoreManagers.maintenance = True\n bottle.response.status = 200\n return dict(ok=False, msg='Maintenance enabled')\n\n@bottle.post('/disable_maintenance')\n@authorize(role='admin')\ndef disable_maintenance():\n \"\"\"Disable maintenance mode.\"\"\"\n CoreManagers.maintenance = False\n bottle.response.status = 200\n return dict(ok=False, msg='Maintenance disabled')\n\n\n################\n# Static pages #\n################\n\n\n@bottle.route('/admin')\n@authorize(role=\"admin\", fail_redirect='/sorry_page')\n@bottle.view('admin_page')\ndef admin():\n \"\"\"Only admin users can see this\"\"\"\n # aaa.require(role='admin', fail_redirect='/sorry_page')\n return dict(\n current_user=aaa.current_user,\n users=aaa.list_users(),\n roles=aaa.list_roles(),\n managers=CoreManagers.list_managers(),\n experimenters=CoreManagers.list_experimenters(),\n )\n\n\n@bottle.route('/login')\n@bottle.view('login_form')\ndef login_form():\n \"\"\"Serve login form\"\"\"\n return {}\n\n\n@bottle.route('/experimenter')\n@bottle.view('experimenter')\n@authorize(role=\"experimenter\", fail_redirect='/sorry_page')\ndef experimenter_form():\n \"\"\"Serve experimenter form\"\"\"\n images, networks, flavours = get_other_resources()\n exp_names, exp_ids, experiment_dict = get_experiment_dict(aaa.current_user.username)\n experiment_dict = __format_experiment_dict(experiment_dict)\n return dict(\n current_user=aaa.current_user,\n resources=get_resources_dict(),\n user_resources=get_resources_dict(aaa.current_user.username),\n images=images,\n networks=networks,\n flavours=flavours,\n experiment_resources=experiment_dict,\n ids=exp_ids,\n )\n\n\n@bottle.route('/calendar')\n@bottle.view('calendar')\n@authorize(role=\"experimenter\", fail_redirect='/sorry_page')\ndef get_calendar():\n return dict(\n calendar=CalendarManager.get_month(),\n current_user=aaa.current_user\n )\n\n\n@bottle.route('/sorry_page')\ndef sorry_page():\n \"\"\"Serve sorry page\"\"\"\n return '

Sorry, you are not authorized to perform this action

'\n\n\n@bottle.route('/static/')\n# @authorize(role=\"experimenter\", fail_redirect='/sorry_page')\ndef server_static(filename):\n \"\"\" route to the css and static files\"\"\"\n if \"..\" in filename:\n return HTTPError(status=403)\n return bottle.static_file(filename, root='%s/static' % get_config('api', 'view-path', '/etc/softfire/views'))\n\n\n#########\n# Utils #\n#########\n\n@bottle.route('/heartbeat')\ndef sorry_page():\n return 'OK'\n\n\ndef error_translation(func):\n def wrapper(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except ValueError as e:\n pass\n traceback.print_exc()\n return HTTPResponse(status=400, body=e.args)\n # bottle.abort(400, e.args)\n except exceptions.ExperimentNotFound as e:\n traceback.print_exc()\n return HTTPResponse(status=404, body=e.message)\n # bottle.abort(404, e.message)\n except exceptions.ExperimentValidationError as e:\n traceback.print_exc()\n return HTTPResponse(status=400, body=e.message)\n # bottle.abort(400, e.message)\n except exceptions.ManagerNotFound as e:\n traceback.print_exc()\n return HTTPResponse(status=404, body=e.message)\n # bottle.abort(404, e.message)\n except exceptions.ResourceAlreadyBooked as e:\n traceback.print_exc()\n return HTTPResponse(status=400, body=e.message)\n # bottle.abort(400, e.message)\n except exceptions.ResourceNotFound as e:\n traceback.print_exc()\n return HTTPResponse(status=404, body=e.message)\n # bottle.abort(404, e.message)\n except exceptions.RpcFailedCall:\n traceback.print_exc()\n return HTTPResponse(status=500,\n body=\"Ups, an internal error occurred, please report to us the procedure and we will fix it\")\n # bottle.abort(500, \"Ups, an internal error occurred, please report to us the procedure and we will fix it\")\n except FileNotFoundError as e:\n traceback.print_exc()\n return HTTPResponse(status=404, body=e.args)\n # bottle.abort(404, \"File not found in your request\")\n\n return wrapper\n\n\ndef postd():\n return bottle.request.forms\n\n\ndef post_get(name, default=''):\n try:\n return json.loads(request.body.read().decode(\"utf-8\")).get(name, default)\n except:\n return bottle.request.POST.get(name, default).strip()\n\n\ndef check_if_authorized(username):\n authorized_experimenter_file = get_config('api', 'authorized-experimenters',\n '/etc/softfire/authorized-experimenters.json')\n if os.path.exists(authorized_experimenter_file) and os.path.isfile(authorized_experimenter_file):\n with open(authorized_experimenter_file, \"r\") as f:\n authorized_exp = json.loads(f.read().encode(\"utf-8\"))\n return authorized_exp.get(username) and bool(authorized_exp[username])\n else:\n return False\n\n\ndef add_authorized_experimenter(username):\n if not os.path.exists(CONFIGURATION_FOLDER):\n os.makedirs(CONFIGURATION_FOLDER)\n authorized_experimenter_file = get_config('api', 'authorized-experimenters',\n '/etc/softfire/authorized-experimenters.json')\n with open(authorized_experimenter_file, 'w') as f:\n authorized_exp = json.loads(f.read().encode(\"utf-8\"))\n authorized_exp[username] = True\n f.write(json.dumps(authorized_exp))\n\n\ndef __format_experiment_dict(experiment):\n \"\"\"\n Format the experiment value. In case an NSR is in the experiment value, only it's most important fields are\n kept, so that the user is not confused by the amount of information.\n :param experiment:\n :return:\n \"\"\"\n formatted_experiment = []\n for resource in experiment:\n if resource.get('node_type') == 'NfvResource' and resource.get('status') == 'DEPLOYED':\n full_nsr = resource.get('value')\n try:\n full_nsr = json.loads(full_nsr)\n except:\n logger.warning('Could not parse NSR of resource: {}'.format(resource.get('resource_id')))\n formatted_experiment.append(resource)\n continue\n\n formatted_nsr = {}\n\n for key in ['name', 'version', 'status']:\n value = full_nsr.get(key)\n if value is not None:\n formatted_nsr[key] = value\n\n vnfr_list = full_nsr.get('vnfr')\n if vnfr_list is not None and isinstance(vnfr_list, list):\n formatted_vnfr_list = []\n for vnfr in vnfr_list:\n formatted_vnfr = {}\n for key in ['name', 'type', 'status']:\n value = vnfr.get(key)\n if value is not None:\n formatted_vnfr[key] = value\n\n # add error description if lifecycle events failed\n vnfr_status = vnfr.get('status')\n if vnfr_status is not None and vnfr_status == 'ERROR':\n for lifecycle_event in vnfr.get('lifecycle_event_history'):\n if lifecycle_event.get('event') == 'ERROR':\n if formatted_vnfr.get('failed lifecycle events') is None:\n formatted_vnfr['failed lifecycle events'] = []\n formatted_vnfr.get('failed lifecycle events').append(\n '{}: {}'.format(lifecycle_event.get('executedAt'),\n lifecycle_event.get('description')))\n\n private_ip_list = []\n floating_ip_list = []\n vdu_list = vnfr.get('vdu')\n if vdu_list is not None and isinstance(vdu_list, list):\n for vdu in vdu_list:\n vnfc_instance_list = vdu.get('vnfc_instance')\n if vnfc_instance_list is not None and isinstance(vnfc_instance_list, list):\n for vnfc_instance in vnfc_instance_list:\n if vnfc_instance.get('floatingIps') is not None and isinstance(\n vnfc_instance.get('floatingIps'), list):\n vnfc_floating_ip_list = ['{}:{}'.format(fip.get('netName'), fip.get('ip')) for\n fip in vnfc_instance.get('floatingIps')]\n floating_ip_list.extend(vnfc_floating_ip_list)\n if vnfc_instance.get('ips') is not None and isinstance(vnfc_instance.get('ips'),\n list):\n vnfc_private_ip_list = ['{}:{}'.format(fip.get('netName'), fip.get('ip')) for\n fip in vnfc_instance.get('ips')]\n private_ip_list.extend(vnfc_private_ip_list)\n formatted_vnfr['private IPs'] = private_ip_list\n formatted_vnfr['floating IPs'] = floating_ip_list\n formatted_vnfr_list.append(formatted_vnfr)\n formatted_nsr['vnfr'] = formatted_vnfr_list\n\n resource['value'] = json.dumps(formatted_nsr)\n formatted_experiment.append(resource)\n return formatted_experiment\n\n\ndef setup_app() -> (SessionMiddleware, int, bool):\n bottle.debug(True)\n p = get_config(section='api', key='port', default=5080)\n bottle.install(error_translation)\n session_opts = {\n 'session.cookie_expires': True,\n 'session.encrypt_key': get_config('api', 'encrypt_key', 'softfire'),\n 'session.httponly': True,\n 'session.timeout': 3600 * 24, # 1 day\n 'session.type': 'cookie',\n 'session.validate_key': True,\n }\n a = SessionMiddleware(bottle.app(), session_opts)\n qb = get_config('api', 'quiet', 'true').lower() == 'true'\n logger.debug(\"Bootlepy quiet mode: %s\" % qb)\n return a, p, qb\n\n\napp, port, quiet_bottle = setup_app()\n\n\ndef start_listening():\n logger.info(\"Running bottle app: quiet=%s, port=%s, host='0.0.0.0'\" % (quiet_bottle, port))\n bottle.run(app, server='paste', host='0.0.0.0', port=port, quiet=quiet_bottle)\n\n\ndef create_user_thread_function(username, password, role='experimenter'):\n \"\"\"\n This function is expected to be executed in a thread pool and called by the Api's create_user function.\n :param username: \n :param password: \n :param role: \n :return: \n \"\"\"\n try:\n CoreManagers.create_user(username=username, password=password, role=role)\n res = requests.post(\"http://localhost:%s/create_user_local\" % port,\n json=dict(username=username, password=password, role=role))\n if res.status_code != 200:\n logger.error('Not able to create cork user {}, return status {}: {}'.format(username, res.status_code,\n str(res.content)))\n try:\n logger.debug('Try to delete user {} for rollback after creation in cork failed'.format(username))\n CoreManagers.delete_user(username=username)\n except Exception as e:\n logger.error(\n 'Deletion of user {} for rollback after cork user creation failed did not succeed: {}'.format(\n username, e))\n\n except Exception as e:\n error_message = 'Create user \\'{}\\' failed: {}'.format(username, str(e))\n logger.error(error_message)\n traceback.print_exc()\n\n\n@post(\"/create_user_local\")\ndef _create_user_cork():\n logger.debug(\"Remote addr: %s\" % request.remote_addr)\n if \"localhost\" in request.remote_addr or \"127.0.0.1\" in request.remote_addr:\n try:\n aaa.login(username='admin', password=get_config('system', 'admin-password', 'softfire'))\n aaa.create_user(request.json.get('username'), request.json.get('role'), request.json.get('password'))\n except Exception as e:\n return HTTPResponse(status=400, body=str(e))\n return HTTPResponse(status=200)\n else:\n return HTTPResponse(status=404)\n", "sub_path": "eu/softfire/tub/api/Api.py", "file_name": "Api.py", "file_ext": "py", "file_size_in_byte": 22431, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "eu.softfire.tub.utils.utils.get_logger", "line_number": 21, "usage_type": "call"}, {"api_name": "bottle.TEMPLATE_PATH", "line_number": 22, "usage_type": "attribute"}, {"api_name": "eu.softfire.tub.utils.utils.get_config", "line_number": 22, "usage_type": "call"}, {"api_name": "cork.Cork", "line_number": 23, "usage_type": "call"}, {"api_name": "eu.softfire.tub.utils.utils.get_config", "line_number": 23, "usage_type": "call"}, {"api_name": "multiprocessing.dummy.Pool", "line_number": 26, "usage_type": "call"}, {"api_name": "bottle.request.environ.get", "line_number": 32, "usage_type": "call"}, {"api_name": "bottle.request.environ", "line_number": 32, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 32, "usage_type": "name"}, {"api_name": "eu.softfire.tub.core.CoreManagers.maintenance", "line_number": 36, "usage_type": "attribute"}, {"api_name": "eu.softfire.tub.core.CoreManagers", "line_number": 36, "usage_type": "name"}, {"api_name": "bottle.HTTPResponse", "line_number": 37, "usage_type": "call"}, {"api_name": "bottle.hook", "line_number": 29, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers.provide_resources", "line_number": 47, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers", "line_number": 47, "usage_type": "name"}, {"api_name": "bottle.redirect", "line_number": 48, "usage_type": "call"}, {"api_name": "bottle.post", "line_number": 43, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers.release_resources", "line_number": 55, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers", "line_number": 55, "usage_type": "name"}, {"api_name": "bottle.redirect", "line_number": 56, "usage_type": "call"}, {"api_name": "bottle.post", "line_number": 51, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers.refresh_resources", "line_number": 62, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers", "line_number": 62, "usage_type": "name"}, {"api_name": "eu.softfire.tub.core.CoreManagers.list_resources", "line_number": 63, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers", "line_number": 63, "usage_type": "name"}, {"api_name": "bottle.get", "line_number": 59, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers.get_experiment_dict", "line_number": 69, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers", "line_number": 69, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 73, "usage_type": "call"}, {"api_name": "bottle.response", "line_number": 77, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 78, "usage_type": "call"}, {"api_name": "bottle.get", "line_number": 66, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers.get_experiment_dict", "line_number": 84, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers", "line_number": 84, "usage_type": "name"}, {"api_name": "bottle.response", "line_number": 86, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 87, "usage_type": "call"}, {"api_name": "bottle.get", "line_number": 81, "usage_type": "call"}, {"api_name": "bottle.response", "line_number": 95, "usage_type": "attribute"}, {"api_name": "bottle.HTTPResponse", "line_number": 97, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 97, "usage_type": "call"}, {"api_name": "bottle.HTTPResponse", "line_number": 99, "usage_type": "call"}, {"api_name": "bottle.post", "line_number": 90, "usage_type": "call"}, {"api_name": "bottle.request.files.get", "line_number": 105, "usage_type": "call"}, {"api_name": "bottle.request.files", "line_number": 105, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 105, "usage_type": "name"}, {"api_name": "bottle.request.files.keys", "line_number": 106, "usage_type": "call"}, {"api_name": "bottle.request.files", "line_number": 106, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 106, "usage_type": "name"}, {"api_name": "bottle.request.files", "line_number": 107, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 107, "usage_type": "name"}, {"api_name": "eu.softfire.tub.core.CoreManagers.Experiment", "line_number": 112, "usage_type": "call"}, {"api_name": "bottle.redirect", "line_number": 113, "usage_type": "call"}, {"api_name": "bottle.request.body.read", "line_number": 114, "usage_type": "call"}, {"api_name": "bottle.request.body", "line_number": 114, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 114, "usage_type": "name"}, {"api_name": "bottle.post", "line_number": 102, "usage_type": "call"}, {"api_name": "bottle.request.forms.get", "line_number": 121, "usage_type": "call"}, {"api_name": "bottle.request.forms", "line_number": 121, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 121, "usage_type": "name"}, {"api_name": "bottle.request.forms.get", "line_number": 122, "usage_type": "call"}, {"api_name": "bottle.request.forms", "line_number": 122, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 122, "usage_type": "name"}, {"api_name": "bottle.request.forms.get", "line_number": 123, "usage_type": "call"}, {"api_name": "bottle.request.forms", "line_number": 123, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 123, "usage_type": "name"}, {"api_name": "bottle.request.forms.get", "line_number": 124, "usage_type": "call"}, {"api_name": "bottle.request.forms", "line_number": 124, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 124, "usage_type": "name"}, {"api_name": "bottle.request.forms.get", "line_number": 125, "usage_type": "call"}, {"api_name": "bottle.request.forms", "line_number": 125, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 125, "usage_type": "name"}, {"api_name": "bottle.request.files.get", "line_number": 126, "usage_type": "call"}, {"api_name": "bottle.request.files", "line_number": 126, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 126, "usage_type": "name"}, {"api_name": "eu.softfire.tub.core.CoreManagers.add_resource", "line_number": 127, "usage_type": "call"}, {"api_name": "bottle.redirect", "line_number": 128, "usage_type": "call"}, {"api_name": "bottle.post", "line_number": 118, "usage_type": "call"}, {"api_name": "bottle.post", "line_number": 135, "usage_type": "call"}, {"api_name": "bottle.route", "line_number": 154, "usage_type": "call"}, {"api_name": "bottle.request.body.read", "line_number": 162, "usage_type": "call"}, {"api_name": "bottle.request.body", "line_number": 162, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 162, "usage_type": "name"}, {"api_name": "bottle.post", "line_number": 159, "usage_type": "call"}, {"api_name": "bottle.route", "line_number": 190, "usage_type": "call"}, {"api_name": "bottle.request.environ.get", "line_number": 203, "usage_type": "call"}, {"api_name": "bottle.request", "line_number": 203, "usage_type": "attribute"}, {"api_name": "bottle.route", "line_number": 200, "usage_type": "call"}, {"api_name": "bottle.request.body.read", "line_number": 217, "usage_type": "call"}, {"api_name": "bottle.request.body", "line_number": 217, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 217, "usage_type": "name"}, {"api_name": "bottle.post", "line_number": 213, "usage_type": "call"}, {"api_name": "bottle.HTTPError", "line_number": 228, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.certificate.CertificateGenerator", "line_number": 231, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.certificate.log_certificate_create", "line_number": 232, "usage_type": "call"}, {"api_name": "bottle.HTTPResponse", "line_number": 240, "usage_type": "call"}, {"api_name": "bottle.post", "line_number": 222, "usage_type": "call"}, {"api_name": "bottle.HTTPResponse", "line_number": 250, "usage_type": "call"}, {"api_name": "bottle.post", "line_number": 243, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers.refresh_user", "line_number": 258, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers", "line_number": 258, "usage_type": "name"}, {"api_name": "bottle.HTTPResponse", "line_number": 259, "usage_type": "call"}, {"api_name": "bottle.post", "line_number": 253, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers.delete_user", "line_number": 267, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers", "line_number": 267, "usage_type": "name"}, {"api_name": "traceback.print_exc", "line_number": 273, "usage_type": "call"}, {"api_name": "bottle.response", "line_number": 274, "usage_type": "attribute"}, {"api_name": "bottle.post", "line_number": 262, "usage_type": "call"}, {"api_name": "bottle.post", "line_number": 278, "usage_type": "call"}, {"api_name": "bottle.post", "line_number": 285, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers.get_all_resources", "line_number": 295, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers", "line_number": 295, "usage_type": "name"}, {"api_name": "bottle.response", "line_number": 296, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 297, "usage_type": "call"}, {"api_name": "bottle.get", "line_number": 292, "usage_type": "call"}, {"api_name": "bottle.response", "line_number": 303, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 304, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers.list_experimenters", "line_number": 304, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers", "line_number": 304, "usage_type": "name"}, {"api_name": "bottle.get", "line_number": 300, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers.maintenance", "line_number": 310, "usage_type": "attribute"}, {"api_name": "eu.softfire.tub.core.CoreManagers", "line_number": 310, "usage_type": "name"}, {"api_name": "bottle.response", "line_number": 311, "usage_type": "attribute"}, {"api_name": "bottle.post", "line_number": 306, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers.maintenance", "line_number": 318, "usage_type": "attribute"}, {"api_name": "eu.softfire.tub.core.CoreManagers", "line_number": 318, "usage_type": "name"}, {"api_name": "bottle.response", "line_number": 319, "usage_type": "attribute"}, {"api_name": "bottle.post", "line_number": 314, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers.list_managers", "line_number": 338, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers", "line_number": 338, "usage_type": "name"}, {"api_name": "eu.softfire.tub.core.CoreManagers.list_experimenters", "line_number": 339, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers", "line_number": 339, "usage_type": "name"}, {"api_name": "bottle.route", "line_number": 328, "usage_type": "call"}, {"api_name": "bottle.view", "line_number": 330, "usage_type": "call"}, {"api_name": "bottle.route", "line_number": 343, "usage_type": "call"}, {"api_name": "bottle.view", "line_number": 344, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers.get_other_resources", "line_number": 355, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers.get_experiment_dict", "line_number": 356, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers.get_resources_dict", "line_number": 360, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers.get_resources_dict", "line_number": 361, "usage_type": "call"}, {"api_name": "bottle.route", "line_number": 350, "usage_type": "call"}, {"api_name": "bottle.view", "line_number": 351, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.calendar.CalendarManager.get_month", "line_number": 375, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.calendar.CalendarManager", "line_number": 375, "usage_type": "name"}, {"api_name": "bottle.route", "line_number": 370, "usage_type": "call"}, {"api_name": "bottle.view", "line_number": 371, "usage_type": "call"}, {"api_name": "bottle.route", "line_number": 380, "usage_type": "call"}, {"api_name": "bottle.HTTPError", "line_number": 391, "usage_type": "call"}, {"api_name": "bottle.static_file", "line_number": 392, "usage_type": "call"}, {"api_name": "eu.softfire.tub.utils.utils.get_config", "line_number": 392, "usage_type": "call"}, {"api_name": "bottle.route", "line_number": 386, "usage_type": "call"}, {"api_name": "bottle.route", "line_number": 399, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 410, "usage_type": "call"}, {"api_name": "bottle.HTTPResponse", "line_number": 411, "usage_type": "call"}, {"api_name": "eu.softfire.tub.exceptions.exceptions.ExperimentNotFound", "line_number": 413, "usage_type": "attribute"}, {"api_name": "eu.softfire.tub.exceptions.exceptions", "line_number": 413, "usage_type": "name"}, {"api_name": "traceback.print_exc", "line_number": 414, "usage_type": "call"}, {"api_name": "bottle.HTTPResponse", "line_number": 415, "usage_type": "call"}, {"api_name": "eu.softfire.tub.exceptions.exceptions.ExperimentValidationError", "line_number": 417, "usage_type": "attribute"}, {"api_name": "eu.softfire.tub.exceptions.exceptions", "line_number": 417, "usage_type": "name"}, {"api_name": "traceback.print_exc", "line_number": 418, "usage_type": "call"}, {"api_name": "bottle.HTTPResponse", "line_number": 419, "usage_type": "call"}, {"api_name": "eu.softfire.tub.exceptions.exceptions.ManagerNotFound", "line_number": 421, "usage_type": "attribute"}, {"api_name": "eu.softfire.tub.exceptions.exceptions", "line_number": 421, "usage_type": "name"}, {"api_name": "traceback.print_exc", "line_number": 422, "usage_type": "call"}, {"api_name": "bottle.HTTPResponse", "line_number": 423, "usage_type": "call"}, {"api_name": "eu.softfire.tub.exceptions.exceptions.ResourceAlreadyBooked", "line_number": 425, "usage_type": "attribute"}, {"api_name": "eu.softfire.tub.exceptions.exceptions", "line_number": 425, "usage_type": "name"}, {"api_name": "traceback.print_exc", "line_number": 426, "usage_type": "call"}, {"api_name": "bottle.HTTPResponse", "line_number": 427, "usage_type": "call"}, {"api_name": "eu.softfire.tub.exceptions.exceptions.ResourceNotFound", "line_number": 429, "usage_type": "attribute"}, {"api_name": "eu.softfire.tub.exceptions.exceptions", "line_number": 429, "usage_type": "name"}, {"api_name": "traceback.print_exc", "line_number": 430, "usage_type": "call"}, {"api_name": "bottle.HTTPResponse", "line_number": 431, "usage_type": "call"}, {"api_name": "eu.softfire.tub.exceptions.exceptions.RpcFailedCall", "line_number": 433, "usage_type": "attribute"}, {"api_name": "eu.softfire.tub.exceptions.exceptions", "line_number": 433, "usage_type": "name"}, {"api_name": "traceback.print_exc", "line_number": 434, "usage_type": "call"}, {"api_name": "bottle.HTTPResponse", "line_number": 435, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 439, "usage_type": "call"}, {"api_name": "bottle.HTTPResponse", "line_number": 440, "usage_type": "call"}, {"api_name": "bottle.request", "line_number": 447, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 452, "usage_type": "call"}, {"api_name": "bottle.request.body.read", "line_number": 452, "usage_type": "call"}, {"api_name": "bottle.request.body", "line_number": 452, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 452, "usage_type": "name"}, {"api_name": "bottle.request.POST.get", "line_number": 454, "usage_type": "call"}, {"api_name": "bottle.request", "line_number": 454, "usage_type": "attribute"}, {"api_name": "eu.softfire.tub.utils.utils.get_config", "line_number": 458, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 460, "usage_type": "call"}, {"api_name": "os.path", "line_number": 460, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 460, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 462, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 469, "usage_type": "call"}, {"api_name": "eu.softfire.tub.utils.static_config.CONFIGURATION_FOLDER", "line_number": 469, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 469, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 470, "usage_type": "call"}, {"api_name": "eu.softfire.tub.utils.static_config.CONFIGURATION_FOLDER", "line_number": 470, "usage_type": "argument"}, {"api_name": "eu.softfire.tub.utils.utils.get_config", "line_number": 471, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 474, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 476, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 491, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 548, "usage_type": "call"}, {"api_name": "bottle.debug", "line_number": 554, "usage_type": "call"}, {"api_name": "eu.softfire.tub.utils.utils.get_config", "line_number": 555, "usage_type": "call"}, {"api_name": "bottle.install", "line_number": 556, "usage_type": "call"}, {"api_name": "eu.softfire.tub.utils.utils.get_config", "line_number": 559, "usage_type": "call"}, {"api_name": "beaker.middleware.SessionMiddleware", "line_number": 565, "usage_type": "call"}, {"api_name": "bottle.app", "line_number": 565, "usage_type": "call"}, {"api_name": "eu.softfire.tub.utils.utils.get_config", "line_number": 566, "usage_type": "call"}, {"api_name": "beaker.middleware.SessionMiddleware", "line_number": 553, "usage_type": "name"}, {"api_name": "bottle.run", "line_number": 576, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers.create_user", "line_number": 588, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers", "line_number": 588, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 589, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers.delete_user", "line_number": 596, "usage_type": "call"}, {"api_name": "eu.softfire.tub.core.CoreManagers", "line_number": 596, "usage_type": "name"}, {"api_name": "traceback.print_exc", "line_number": 605, "usage_type": "call"}, {"api_name": "bottle.request.remote_addr", "line_number": 610, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 610, "usage_type": "name"}, {"api_name": "bottle.request.remote_addr", "line_number": 611, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 611, "usage_type": "name"}, {"api_name": "eu.softfire.tub.utils.utils.get_config", "line_number": 613, "usage_type": "call"}, {"api_name": "bottle.request.json.get", "line_number": 614, "usage_type": "call"}, {"api_name": "bottle.request.json", "line_number": 614, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 614, "usage_type": "name"}, {"api_name": "bottle.HTTPResponse", "line_number": 616, "usage_type": "call"}, {"api_name": "bottle.HTTPResponse", "line_number": 617, "usage_type": "call"}, {"api_name": "bottle.HTTPResponse", "line_number": 619, "usage_type": "call"}, {"api_name": "bottle.post", "line_number": 608, "usage_type": "call"}]} +{"seq_id": "22423189", "text": "import random\nimport bisect\nimport math\nimport sys\n\nimport matplotlib.pyplot as plt\nimport numpy\n\ndef linspace(start, stop, n_bins=50):\n result = list()\n for i in range(n_bins):\n result.append(start + i*(stop-start)/float(n_bins-1))\n return(tuple(result))\n\ndef logspace(start, stop, n_bins=50):\n return(tuple(map(lambda x: 10**x,\n linspace(math.log10(start),\n math.log10(stop),\n n_bins))))\n\ndef dx(L):\n \"\"\"\n Return the mean of every pair of consecutive elements.\n \"\"\"\n iterable = iter(L)\n first = next(iterable)\n\n for second in iterable:\n yield(float(first + second)/2)\n first = second\n \ndef normalized(L):\n my_sum = float(sum(L))\n\n return(map(lambda x: x/my_sum, L))\n\ndef cumulative_sum(L):\n total = 0\n\n for elem in L:\n total += elem\n yield(total)\n\ndef get_bin_index(bins, value):\n # Find rightmost value less than the bins.\n index = bisect.bisect_left(bins, value)\n if index == len(bins):\n # bins represent lower and upper bounds, so if we hit the\n # uppermost bound we can count that as part of the uppermost bin\n return(index-1)\n else:\n return(index)\n\nclass Distribution:\n \"\"\"\n Implements a probability distribution over a continuous range\n with weights associated with the given bins. Once initialized,\n draw() can be used to draw values efficiently from the distribution.\n\n If you have to use a non-linear scale for bins, use scale and scale_invert\n functions which linearize the scale to ensure appropriate sampling.\n \"\"\"\n def __init__(self, bins, weights):\n if len(bins)-1 != len(weights) or len(bins) < 2:\n raise(ValueError(\"Must have one more bin boundary than weight.\"))\n\n self.bins = bins\n self.weights = tuple(normalized(weights))\n self.cumulative = tuple(cumulative_sum(self.weights))\n\n def bin_centers(self):\n return(self.bins[:-1]+numpy.diff(self.bins))\n\n def draw(self):\n x = random.random()\n bin_index = get_bin_index(self.cumulative, x)\n lower = self.bins[bin_index]\n upper = self.bins[bin_index+1]\n return(lower + (upper-lower)*random.random())\n\n def draw_bin(self):\n x = random.random()\n bin_index = get_bin_index(self.cumulative, x)\n lower = self.bins[bin_index]\n upper = self.bins[bin_index+1]\n return(lower, upper)\n\n def plot(self):\n plt.clf()\n plt.plot(self.bin_centers(), self.weights)\n plt.show()\n\n def loglog(self):\n plt.clf()\n plt.loglog(self.bin_centers(), self.weights, marker=\"x\")\n plt.show()\n\ndef power_law(exponent, lower, upper, n_bins=50):\n \"\"\"\n Return a power law distribution on the given limits, using the given\n exponent.\n\n f(x) = x^exponent\n \"\"\"\n bins = logspace(lower, upper, n_bins)\n weights = tuple(map(lambda x: x**exponent, dx(bins)))\n scale = lambda x: math.log10(x)\n scale_invert = lambda x: 10**x\n return(Distribution(bins, weights))#, scale, scale_invert))\n\ndef exponential(exponent, lower, upper, n_bins=50):\n \"\"\"\n Return an exponential distrbution on the given limits, with the given\n exponent.\n\n f(x) = exp(exponent*x)\n \"\"\"\n bins = linspace(lower, upper, n_bins)\n weights = tuple(map(lambda x: math.exp(exponent*x), dx(bins)))\n return(Distribution(bins, weights))\n\ndef poisson(x):\n L = math.exp(-x)\n k = 0\n p = 1\n\n while p > L:\n k += 1\n p *= random.random()\n\n return(k-1)\n", "sub_path": "dot_emission/Distribution.py", "file_name": "Distribution.py", "file_ext": "py", "file_size_in_byte": 3603, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "math.log10", "line_number": 17, "usage_type": "call"}, {"api_name": "math.log10", "line_number": 18, "usage_type": "call"}, {"api_name": "bisect.bisect_left", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 72, "usage_type": "call"}, {"api_name": "random.random", "line_number": 75, "usage_type": "call"}, {"api_name": "random.random", "line_number": 79, "usage_type": "call"}, {"api_name": "random.random", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.loglog", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "math.log10", "line_number": 107, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 119, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 123, "usage_type": "call"}, {"api_name": "random.random", "line_number": 129, "usage_type": "call"}]} +{"seq_id": "183215057", "text": "import os\nimport time\nimport urllib.request\nfrom datetime import datetime\nimport re\n\ndef logging(msg):\n from time import gmtime, strftime\n print(strftime(datetime.now().strftime('%Y-%m-%d %H:%M:%S')), msg)\n\nclass WatchDog:\n def __init__(self, hashrate_threshold, timeout=.5):\n self.hashrate_threshold = hashrate_threshold\n self.timeout=.5\n self.drop_cnt = 0\n\n def loop(self):\n fail_cnt = 0\n is_activate = False\n logging(\"watchdog started\")\n while True:\n try:\n with urllib.request.urlopen('http://127.0.0.1:10240', timeout=self.timeout) as response:\n html = response.read().decode('utf-8')\n self.check_hashrate(html, self.hashrate_threshold)\n if fail_cnt > 0:\n fail_cnt -= 1\n if not is_activate:\n is_activate = True\n except Exception as e:\n logging(\"failed, %s\" %e)\n if is_activate:\n fail_cnt += 1\n logging(\"WARNING: fail cnt %d\" % fail_cnt)\n if (fail_cnt >= 5) & is_activate:\n logging(\"restart\")\n restart()\n is_activate = False\n time.sleep(5)\n\n def check_hashrate(self, html, threshold):\n try:\n current = float(re.search(r\"Totals:[ ]*([.0-9]*)\", html).group(1))\n except Exception as e:\n current = 0\n logging(\"current %.2f\" % current)\n if (current > 1) and current < float(threshold):\n self.drop_cnt += 1\n if self.drop_cnt > 9:\n logging(\"restart\")\n restart()\n elif current >= float(threshold):\n if self.drop_cnt > 0:\n self.drop_cnt -= 1\n\n\ndef restart():\n os.system(\"taskkill /f /im xmr-stak.exe\")\n os.system(\"taskkill /f /im AdlCtrl.exe\")\n time.sleep(5)\n os.system(\"mining.bat\")\n exit()\n\n\nif __name__ == '__main__':\n import sys\n a=WatchDog(sys.argv[1])\n a.loop()\n", "sub_path": "xmv_watchdog/watchdog.py", "file_name": "watchdog.py", "file_ext": "py", "file_size_in_byte": 2099, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "time.strftime", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 9, "usage_type": "name"}, {"api_name": "urllib.request.request.urlopen", "line_number": 23, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 23, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 23, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 39, "usage_type": "call"}, {"api_name": "re.search", "line_number": 43, "usage_type": "call"}, {"api_name": "os.system", "line_number": 58, "usage_type": "call"}, {"api_name": "os.system", "line_number": 59, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 60, "usage_type": "call"}, {"api_name": "os.system", "line_number": 61, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 67, "usage_type": "attribute"}]} +{"seq_id": "160763532", "text": "''' \r\nympy bespoke measurement pipelines.\r\nFollow the examples here to create your own\r\n'''\r\n\r\nimport pickle\r\nimport numpy as np\r\nimport datetime\r\nimport os\r\n\r\nimport ympy\r\nfrom ympy.parameterSets import YmpyParam\r\n\r\nclass GFPwMarkerPipeline():\r\n \r\n #%% run the pipeline main function\r\n \r\n def runPipeline(self):\r\n self.history.append('ran pipeline at ' + str(datetime.datetime.now()))\r\n self.setupResultsfolder()\r\n self.scaleBrightness()\r\n total = self.analyzeRange()[1] - self.analyzeRange()[0]\r\n self.history.append(['entered main loop'])\r\n for field in range(*self.analyzeRange()):\r\n self.current_field = field\r\n print('field #{} of {} total'.format(field, total))\r\n self.readCurrentField()\r\n self.segmentImage()\r\n self.buildMeasurementMasks()\r\n self.measureSingleField()\r\n\r\n self.saveState()\r\n\r\n def __init__(self, experiment_parameter_dict):\r\n # setup parameters. Pulls default parameters from ympy.parameterSets\r\n # during initiation of YmpyParam object. Pass a dictionary of \r\n # experiment specific parameters following template_GFPwMarker_params\r\n self.Param = YmpyParam(experiment_parameter_dict)\r\n self.folder_data = ympy.batchParse(\r\n self.Param.folder_path, self.Param.exp_ID_loc,\r\n self.Param.image_extension)\r\n # initialize experiment variables\r\n self.totalResults = []\r\n self.total_bool_masks = []\r\n self.fieldsAnalyzed = []\r\n self.totalMcl = []\r\n self.history = ['initialized']\r\n self.current_field = []\r\n self.field_log = []\r\n self.image = []\r\n self.unbuffered_master_cell_label = []\r\n self.buffered_master_cell_label = []\r\n # status flags\r\n self._found_results_folder = False\r\n self._scaled_brightness = False\r\n self._read_field = False\r\n self._segmented_image = False\r\n self._made_masks = False\r\n self._measured = False\r\n self._saved = False\r\n # order to run functions in\r\n self.order = [\r\n 'setupResultsfolder',\r\n 'scaleBrightness',\r\n 'readCurrentField',\r\n 'segmentImage',\r\n 'buildMeasurementMasks',\r\n 'measureSingleField',\r\n 'saveState'\r\n ]\r\n \r\n def help(self):\r\n print(' ---ympy pipeline object---\\n',\r\n 'Usage:\\n',\r\n 'Reference and set parameters from Param attribute.\\n',\r\n 'GFPmarkerPipeline.Param.listParamters() to',\r\n 'view current parameters\\n',\r\n 'GFPmarkerPipeline.runPipeline() to proccess all files in\\n',\r\n ' GFPmarkerPipeline.Param.target_folder')\r\n \r\n #%% setup folder \r\n \r\n def setupResultsfolder(self): \r\n print('beginning analysis of \\n{}\\n at {}'.format(\r\n self.Param.folder_path, datetime.datetime.now()))\r\n resultsDirectory = self.Param.folder_path + '/results/'\r\n if not os.path.exists(resultsDirectory):\r\n self.history.append('created results folder')\r\n os.makedirs(resultsDirectory)\r\n self._found_results_folder = True\r\n\r\n #%% measure global values (slow for big datasets)\r\n \r\n def scaleBrightness(self):\r\n self.checkState('scaleBrightness')\r\n self.history.append('scaled experiment brightness values')\r\n self.global_extrema = {}\r\n self.global_extrema['green'] = ympy.batchIntensityScale(\r\n self.folder_data,\r\n self.Param.image_reader,\r\n self.Param.reader_args,\r\n self.Param.green_channel,\r\n self.Param.show_progress)\r\n self.global_extrema['red'] = ympy.batchIntensityScale(\r\n self.folder_data,\r\n self.Param.image_reader,\r\n self.Param.reader_args,\r\n self.Param.red_channel,\r\n self.Param.show_progress)\r\n self._scaled_brightness = True\r\n \r\n\r\n #%% read image with rederHelper and image_reader\r\n \r\n def readCurrentField(self):\r\n # begin tracking current field\r\n self.field_log = self.history.pop()\r\n self.checkState('readCurrentField')\r\n # read image\r\n field_path = self.folder_data['path_list'][self.current_field]\r\n self.image = self.Param.image_reader(\r\n **ympy.helpers.readerHelper(\r\n self.Param.image_reader,\r\n field_path,\r\n self.Param.reader_args))\r\n self.image = ympy.helpers.cropRolloff(\r\n self.image,\r\n self.Param.image_rolloff)\r\n self.field_log.append('read image for field #'+str(self.current_field))\r\n self._read_field = True\r\n # must resegment, mask, and measure after calling readCurrentField to\r\n # avoid measurement/masking mismatch\r\n self._segmented_image = False\r\n self._made_masks = False\r\n self._measured = False\r\n self._saved = False\r\n \r\n #%% find cells and cleanup morphology\r\n \r\n def segmentImage(self):\r\n self.checkState('segmentImage')\r\n # find cells from brightfield step 1\r\n bw_cell_zstack = ympy.makeCellzStack( \r\n self.image,\r\n self.Param.bf_channel,\r\n self.Param.show_progress)\r\n # find cells from brightfield step 2\r\n nZslices = self.image.shape[1]\r\n for z in range(nZslices):\r\n bw_cell_zstack[z, :, :] = ympy.helpers.correctBFanomaly(\r\n bw_cell_zstack[z, :, :],\r\n self.Param.bf_offest_vector)\r\n # find cells from brightfield step 3\r\n raw_mcl = ympy.cellsFromZstack(\r\n bw_cell_zstack,\r\n self.Param.show_progress)[0]\r\n # find cells from brightfield step 4\r\n self.unbuffered_master_cell_label = ympy.bfCellMorphCleanup( \r\n raw_mcl, \r\n self.Param.show_progress, \r\n self.Param.min_angle, \r\n self.Param.min_length, \r\n self.Param.closing_radius,\r\n self.Param.min_bud_size)\r\n self.field_log.append('segmented image for field #'\r\n + str(self.current_field))\r\n self.ncells = np.max(self.unbuffered_master_cell_label)\r\n self.field_log.append(\r\n 'found {} cells in field #{}'.format(\r\n self.ncells,\r\n self.current_field))\r\n self._segmented_image = True\r\n\r\n #%% define measurment masks\r\n \r\n def buildMeasurementMasks(self):\r\n self.checkState('buildMeasurementMasks')\r\n # unbufferedMcl is the best guess at the 'true outside edge' of \r\n # the cells; use it as the starting point to find a 10pixel thick \r\n # cortex\r\n inner_cortex_mcl = ympy.labelCortex_mcl( \r\n self.unbuffered_master_cell_label,\r\n self.Param.cortex_width)\r\n # because the bright field and fluorescence are not perfectly \r\n # aligned, and to handle inaccuracies in edge finding, also buffer \r\n # out from the outside edge\r\n buffer = ympy.buffer_mcl(\r\n self.unbuffered_master_cell_label,\r\n self.Param.buffer_size,\r\n self.Param.show_progress)\r\n # merge this buffer onto the unbuffered_master_cell_label and the \r\n # inner_cortex_mcl\r\n self.buffered_master_cell_label = ympy.merge_labelMcl(\r\n self.unbuffered_master_cell_label,\r\n buffer) \r\n full_cortex_mcl = ympy.merge_labelMcl(\r\n inner_cortex_mcl,\r\n buffer)\r\n # use Otsu thresholding on the max projection of RFPmarker\r\n marker_mcl_otsu = ympy.labelMaxproj(\r\n self.buffered_master_cell_label,\r\n self.image,\r\n self.Param.marker_channel)\r\n # then use centroidCircles to uniformly mask peri-golgi regions\r\n marker_mcl_ccadjusted = ympy.centroidCirclesMcl( \r\n marker_mcl_otsu.astype('bool'), \r\n self.buffered_master_cell_label,\r\n self.Param.marker_radius, \r\n self.Param.marker_circle_iterations)\r\n # subtract so that marker localization has precedence over cortical\r\n # localization\r\n cortex_mcl_nonmarker = ympy.subtract_labelMcl( \r\n full_cortex_mcl, \r\n marker_mcl_ccadjusted)\r\n # finally, compute mask for remaining cytoplasmic regions\r\n cytoplasm_mcl = ympy.subtract_labelMcl(\r\n self.buffered_master_cell_label,\r\n ympy.merge_labelMcl(\r\n marker_mcl_ccadjusted,\r\n cortex_mcl_nonmarker))\r\n self.ref_mcl_dict = {\r\n 'cortex(non-{})'.format(self.Param.marker_name):\r\n cortex_mcl_nonmarker,\r\n '{}(circles)'.format(self.Param.marker_name):\r\n marker_mcl_ccadjusted,\r\n 'cytoplasm': cytoplasm_mcl}\r\n self.field_log.append('built measurement masks for field #'\r\n + str(self.current_field))\r\n self.bool_masks = {\r\n 'cortex(non-{})_mask'.format(self.Param.marker_name):\r\n cortex_mcl_nonmarker.astype(bool),\r\n '{}(circles)_mask'.format(self.Param.marker_name):\r\n marker_mcl_ccadjusted.astype(bool),\r\n 'unbuffered_mask':\r\n self.unbuffered_master_cell_label.astype(bool)\r\n }\r\n self._made_masks = True\r\n \r\n #%% measure\r\n \r\n def measureSingleField(self):\r\n self.checkState('measureSingleField')\r\n # measure Art1-mNG in the middle z-slice\r\n primaryImage = {\r\n self.Param.measured_protein_name:\r\n self.image[self.Param.measured_protein_channel,\r\n self.Param.measured_protein_z, :, :]}\r\n # measure against buffered cortex (minus marker mask), marker, and \r\n # cytoplasm\r\n # also record field wide information\r\n # measurement function\r\n results = ympy.measure_cells(\r\n primaryImage,\r\n self.buffered_master_cell_label,\r\n self.ref_mcl_dict,\r\n self.folder_data['imagename_list'][self.current_field],\r\n self.folder_data['expID_list'][self.current_field],\r\n self.current_field,\r\n self.global_extrema['green']['globalmin'],\r\n self.global_extrema['green']['globalmax'],\r\n self.Param.n_hist_bins,\r\n self.Param.show_progress)\r\n for cell in range(self.ncells):\r\n hist_scores = ympy.cortex_marker_histScore(\r\n results[cell],\r\n self.Param)\r\n results[cell].update(hist_scores)\r\n # add measurements from each field to total results\r\n self.field_log.append('measured fluorescence for field #'\r\n + str(self.current_field))\r\n self.totalResults = list(np.concatenate((self.totalResults, results)))\r\n self._measured = True\r\n \r\n #%% pool and save\r\n\r\n def saveState(self):\r\n self.checkState('saveState')\r\n print('saving progress')\r\n self.fieldsAnalyzed.append(self.current_field)\r\n self.totalMcl.append(self.buffered_master_cell_label)\r\n self.total_bool_masks.append(self.bool_masks)\r\n resultsDic = {\r\n 'totalResults': self.totalResults,\r\n 'fieldsAnalyzed': self.fieldsAnalyzed,\r\n 'totalMcl': self.totalMcl,\r\n 'parameters': self.Param.listParameters(),\r\n 'object_history': self.history,\r\n 'total_bool_masks': self.total_bool_masks\r\n }\r\n date_today = str(datetime.datetime.now().date())\r\n save_path = '{}/results/{}_analysis.p'.format(\r\n self.Param.folder_path, date_today)\r\n pickle.dump(resultsDic, open(save_path, 'wb'))\r\n print(self.folder_data['imagename_list'][self.current_field],\r\n ' complete at ', datetime.datetime.now())\r\n self.field_log.append('saved state after analysis of field #'\r\n + str(self.current_field))\r\n \r\n self.history.append(self.field_log)\r\n self._saved = True\r\n \r\n #%% helper methods\r\n def checkState(self, state_function_name):\r\n state = [self._found_results_folder,\r\n self._scaled_brightness,\r\n self._read_field,\r\n self._segmented_image,\r\n self._made_masks,\r\n self._measured,\r\n self._saved\r\n ]\r\n position = self.order.index(state_function_name)\r\n error_text_1 = ('\\nmust call runPipeline,\\nor call main pipeline'\r\n 'functions in order:\\n')\r\n error_text_2 = ',\\n'.join('{}: {}'.format(num +1, val)\r\n for num, val in enumerate(self.order))\r\n error_text_3 = '\\nattempted to call {} before calling {}'.format(\r\n state_function_name, ', '.join(\r\n np.array(self.order[0:position])[~np.array(\r\n state[0:position])]))\r\n error_text = error_text_1 + error_text_2 + error_text_3\r\n if not all(state[0:position]):\r\n raise Exception(error_text)\r\n if self._saved:\r\n if state_function_name is not 'readCurrentField':\r\n error_text = ('analysis finished for field {}, use '\r\n 'current_field and readCurrentField to' \r\n 'initialize analysis of a new field'.format(\r\n self.current_field))\r\n raise Exception(error_text)\r\n \r\n \r\n def analyzeRange(self):\r\n if self.Param.measure_fields == 'all':\r\n start = 0\r\n stop = self.folder_data['n_fields']\r\n else:\r\n start = self.Param.measure_fields[0]\r\n stop = self.Param.measure_fields[1]\r\n return(start, stop)\r\n ", "sub_path": "ympy/pipelines.py", "file_name": "pipelines.py", "file_ext": "py", "file_size_in_byte": 14495, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "datetime.datetime.now", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "attribute"}, {"api_name": "ympy.parameterSets.YmpyParam", "line_number": 38, "usage_type": "call"}, {"api_name": "ympy.batchParse", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 85, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 85, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 89, "usage_type": "call"}, {"api_name": "ympy.batchIntensityScale", "line_number": 98, "usage_type": "call"}, {"api_name": "ympy.batchIntensityScale", "line_number": 104, "usage_type": "call"}, {"api_name": "ympy.helpers.readerHelper", "line_number": 122, "usage_type": "call"}, {"api_name": "ympy.helpers", "line_number": 122, "usage_type": "attribute"}, {"api_name": "ympy.helpers.cropRolloff", "line_number": 126, "usage_type": "call"}, {"api_name": "ympy.helpers", "line_number": 126, "usage_type": "attribute"}, {"api_name": "ympy.makeCellzStack", "line_number": 143, "usage_type": "call"}, {"api_name": "ympy.helpers.correctBFanomaly", "line_number": 150, "usage_type": "call"}, {"api_name": "ympy.helpers", "line_number": 150, "usage_type": "attribute"}, {"api_name": "ympy.cellsFromZstack", "line_number": 154, "usage_type": "call"}, {"api_name": "ympy.bfCellMorphCleanup", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 167, "usage_type": "call"}, {"api_name": "ympy.labelCortex_mcl", "line_number": 181, "usage_type": "call"}, {"api_name": "ympy.buffer_mcl", "line_number": 187, "usage_type": "call"}, {"api_name": "ympy.merge_labelMcl", "line_number": 193, "usage_type": "call"}, {"api_name": "ympy.merge_labelMcl", "line_number": 196, "usage_type": "call"}, {"api_name": "ympy.labelMaxproj", "line_number": 200, "usage_type": "call"}, {"api_name": "ympy.centroidCirclesMcl", "line_number": 205, "usage_type": "call"}, {"api_name": "ympy.subtract_labelMcl", "line_number": 212, "usage_type": "call"}, {"api_name": "ympy.subtract_labelMcl", "line_number": 216, "usage_type": "call"}, {"api_name": "ympy.merge_labelMcl", "line_number": 218, "usage_type": "call"}, {"api_name": "ympy.measure_cells", "line_number": 252, "usage_type": "call"}, {"api_name": "ympy.cortex_marker_histScore", "line_number": 264, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 271, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 290, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 290, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 293, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 295, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 295, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 319, "usage_type": "call"}]} +{"seq_id": "144000661", "text": "from __future__ import division\nimport argparse\nimport os\nimport tqdm\nimport random\nfrom visdom import Visdom\nimport torch\nimport numpy as np\nfrom IPython import embed\nfrom collections import OrderedDict\nimport mmcv\nfrom mmcv import Config\nfrom mmcv.runner import DistSamplerSeedHook, Runner, obj_from_dict\nfrom mmcv.runner import get_dist_info, load_checkpoint\nfrom mmdet import __version__\nfrom mmdet.apis import (get_root_logger, init_dist, set_random_seed,\n train_detector)\nfrom mmcv.parallel import MMDataParallel, MMDistributedDataParallel\nfrom mmdet.datasets import build_dataset, DATASETS, build_dataloader\nfrom mmdet.models import build_detector\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Train a detector')\n parser.add_argument('config', help='train config file path')\n parser.add_argument('--work_dir', help='the dir to save logs and models')\n parser.add_argument(\n '--resume_from', help='the checkpoint file to resume from')\n parser.add_argument('--vis', action='store_true',\n help='whether visualzie result')\n parser.add_argument(\n '--validate',\n action='store_true',\n help='whether to evaluate the checkpoint during training')\n parser.add_argument(\n '--gpus',\n type=int,\n default=1,\n help='number of gpus to use '\n '(only applicable to non-distributed training)')\n parser.add_argument('--seed', type=int, default=None, help='random seed')\n parser.add_argument(\n '--launcher',\n choices=['none', 'pytorch', 'slurm', 'mpi'],\n default='none',\n help='job launcher')\n parser.add_argument('--local_rank', type=int, default=0)\n parser.add_argument(\n '--autoscale-lr',\n action='store_true',\n help='automatically scale lr with the number of gpus')\n args = parser.parse_args()\n if 'LOCAL_RANK' not in os.environ:\n os.environ['LOCAL_RANK'] = str(args.local_rank)\n\n return args\n\ndef build_optimizer(model, optimizer_cfg):\n \"\"\"Build optimizer from configs.\n\n Args:\n model (:obj:`nn.Module`): The model with parameters to be optimized.\n optimizer_cfg (dict): The config dict of the optimizer.\n Positional fields are:\n - type: class name of the optimizer.\n - lr: base learning rate.\n Optional fields are:\n - any arguments of the corresponding optimizer type, e.g.,\n weight_decay, momentum, etc.\n - paramwise_options: a dict with 3 accepted fileds\n (bias_lr_mult, bias_decay_mult, norm_decay_mult).\n `bias_lr_mult` and `bias_decay_mult` will be multiplied to\n the lr and weight decay respectively for all bias parameters\n (except for the normalization layers), and\n `norm_decay_mult` will be multiplied to the weight decay\n for all weight and bias parameters of normalization layers.\n\n Returns:\n torch.optim.Optimizer: The initialized optimizer.\n \"\"\"\n if hasattr(model, 'module'):\n model = model.module\n \n optimizer_cfg = optimizer_cfg.copy()\n paramwise_options = optimizer_cfg.pop('paramwise_options', None)\n # if no paramwise option is specified, just use the global setting\n if paramwise_options is None:\n return obj_from_dict(optimizer_cfg, torch.optim,\n dict(params=model.parameters()))\n else:\n assert isinstance(paramwise_options, dict)\n # get base lr and weight decay\n base_lr = optimizer_cfg['lr']\n base_wd = optimizer_cfg.get('weight_decay', None)\n # weight_decay must be explicitly specified if mult is specified\n if ('bias_decay_mult' in paramwise_options\n or 'norm_decay_mult' in paramwise_options):\n assert base_wd is not None\n # get param-wise options\n bias_lr_mult = paramwise_options.get('bias_lr_mult', 1.)\n bias_decay_mult = paramwise_options.get('bias_decay_mult', 1.)\n norm_decay_mult = paramwise_options.get('norm_decay_mult', 1.)\n # set param-wise lr and weight decay\n params = []\n for name, param in model.named_parameters():\n param_group = {'params': [param]}\n if not param.requires_grad:\n # FP16 training needs to copy gradient/weight between master\n # weight copy and model weight, it is convenient to keep all\n # parameters here to align with model.parameters()\n params.append(param_group)\n continue\n\n # for norm layers, overwrite the weight decay of weight and bias\n # TODO: obtain the norm layer prefixes dynamically\n if re.search(r'(bn|gn)(\\d+)?.(weight|bias)', name):\n if base_wd is not None:\n param_group['weight_decay'] = base_wd * norm_decay_mult\n # for other layers, overwrite both lr and weight decay of bias\n elif name.endswith('.bias'):\n param_group['lr'] = base_lr * bias_lr_mult\n if base_wd is not None:\n param_group['weight_decay'] = base_wd * bias_decay_mult\n # otherwise use the global settings\n\n params.append(param_group)\n \n optimizer_cls = getattr(torch.optim, optimizer_cfg.pop('type'))\n return optimizer_cls(params, **optimizer_cfg)\n\ndef parse_losses(losses):\n log_vars = OrderedDict()\n for loss_name, loss_value in losses.items():\n if isinstance(loss_value, torch.Tensor):\n log_vars[loss_name] = loss_value.mean()\n elif isinstance(loss_value, list):\n log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)\n else:\n raise TypeError(\n '{} is not a tensor or list of tensors'.format(loss_name))\n\n loss = sum(_value for _key, _value in log_vars.items() if 'loss' in _key)\n\n log_vars['loss'] = loss\n for name in log_vars:\n log_vars[name] = log_vars[name].item()\n\n return loss, log_vars\n\ndef weights_to_cpu(state_dict):\n \"\"\"Copy a model state_dict to cpu.\n\n Args:\n state_dict (OrderedDict): Model weights on GPU.\n\n Returns:\n OrderedDict: Model weights on GPU.\n \"\"\"\n state_dict_cpu = OrderedDict()\n for key, val in state_dict.items():\n state_dict_cpu[key] = val.cpu()\n return state_dict_cpu\n\ndef main():\n args = parse_args()\n if args.vis:\n vis = Visdom(env=args.model_name)\n clsloss_win = vis.line(X=torch.zeros((1,)).cpu(), Y=torch.zeros((1)).cpu(),\n opts=dict(xlabel='image_number', ylabel='cls_loss', title='cls_loss',\n legend=['cls_Loss']))\n locloss_win = vis.line(X=torch.zeros((1,)).cpu(), Y=torch.zeros((1)).cpu(),\n opts=dict(xlabel='image_number', ylabel='loc_loss', title='loc_loss',\n legend=['loc_Loss']))\n cfg = Config.fromfile(args.config)\n # set cudnn_benchmark\n if cfg.get('cudnn_benchmark', False):\n torch.backends.cudnn.benchmark = True\n # update configs according to CLI args\n if args.work_dir is not None:\n cfg.work_dir = args.work_dir\n if args.resume_from is not None:\n cfg.resume_from = args.resume_from\n cfg.gpus = args.gpus\n\n if args.autoscale_lr:\n # apply the linear scaling rule (https://arxiv.org/abs/1706.02677)\n cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8\n\n # init distributed env first, since logger depends on the dist info.\n if args.launcher == 'none':\n distributed = False\n else:\n distributed = True\n init_dist(args.launcher, **cfg.dist_params)\n\n # init logger before other steps\n logger = get_root_logger(cfg.log_level)\n logger.info('Distributed training: {}'.format(distributed))\n\n # set random seeds\n if args.seed is not None:\n logger.info('Set random seed to {}'.format(args.seed))\n set_random_seed(args.seed)\n # print('train')\n # embed()\n datasets = [build_dataset(cfg.data.train)]\n if len(cfg.workflow) == 2:\n datasets.append(build_dataset(cfg.data.val))\n model = build_detector(\n cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)\n if cfg.load_from:\n checkpoint = load_checkpoint(model, cfg.load_from, map_location='cpu')\n # old versions did not save class info in checkpoints, this walkaround is\n # for backward compatibility\n if 'CLASSES' in checkpoint['meta']:\n model.CLASSES = checkpoint['meta']['CLASSES']\n else:\n model.CLASSES = datasets[0].CLASSES\n \n if cfg.checkpoint_config is not None:\n # save mmdet version, config file content and class names in\n # checkpoints as meta data\n cfg.checkpoint_config.meta = dict(\n mmdet_version=__version__,\n config=cfg.text,\n CLASSES=datasets[0].CLASSES)\n # add an attribute for visualization convenience\n model.CLASSES = datasets[0].CLASSES\n\n \n data_loader = build_dataloader(\n datasets[0],\n imgs_per_gpu=cfg.data.imgs_per_gpu,\n workers_per_gpu=cfg.data.workers_per_gpu,\n num_gpus=1,\n dist=False,\n shuffle=False)\n # put model on gpus\n model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()\n model.train()\n \n optimizer = build_optimizer(model, cfg.optimizer)\n \n check_video=None\n num_per_epoch=len(data_loader)//cfg.total_epochs\n start_epoch=0\n meta=None\n epoch=start_epoch\n for e in range(cfg.total_epochs):\n for i, data in enumerate(data_loader):\n # if epoch != i // num_per_epoch + start_epoch:\n # epoch = i // num_per_epoch + start_epoch\n \n if len(data['gt_bboxes'].data[0][0]) == 0:\n continue\n reference_id=(data['img_meta'].data[0][0]['filename'].split('/')[-1]).split('.')[0]\n video_id=data['img_meta'].data[0][0]['filename'].split('/')[-2]\n before=max(i-13,i-int(reference_id))\n # after=min(i)\n reference=data['img'].data[0]\n if epoch>5:\n j=random.randint(before,i)\n support=(datasets[0][j]['img'].data).unsqueeze(0)\n support_id=(datasets[0][j]['img_meta'].data['filename'].split('/')[-1]).split('.')[0]\n svideo_id=(datasets[0][j]['img_meta'].data['filename'].split('/')[-2])\n else:\n support=reference\n support_id=reference_id\n svideo_id=video_id\n \n # data['img']=torch.cat([support,reference],dim=0)\n \n losses=model(return_loss=True, **data)\n \n loss, log_vars = parse_losses(losses)\n if np.isnan(loss.item()):\n embed()\n exit()\n \n optimizer.zero_grad()\n loss.backward(retain_graph=False)\n optimizer.step()\n # if np.isnan(loss.item()):\n # loss.backward(retain_graph=False)\n # optimizer.zero_grad()\n # continue\n # optimizer.zero_grad()\n # loss.backward(retain_graph=False)\n # optimizer.step()\n \n if epoch % 1 == 0:\n if meta is None:\n meta = dict(epoch=epoch + 1, iter=i)\n else:\n meta.update(epoch=epoch + 1, iter=i)\n checkpoint = {\n 'meta': meta,\n 'state_dict': weights_to_cpu(model.state_dict())\n }\n if optimizer is not None:\n checkpoint['optimizer'] = optimizer.state_dict()\n mmcv.mkdir_or_exist(os.path.dirname(args.work_dir))\n filename=os.path.join(args.work_dir,'epoch_{}.pth'.format(epoch))\n torch.save(checkpoint,filename)\n \n print(args.work_dir.split('/')[-2],'i:',i,'epoch:',epoch,'video_id:',video_id,'support_id:',support_id,'reference_id:',reference_id,'loss_rpn_cls:',log_vars['loss_rpn_cls'],'loss_rpn_bbox:',log_vars['loss_rpn_bbox'],\n 'loss_cls:',log_vars['loss_cls'],'acc:',log_vars['acc'],'loss_bbox:',log_vars['loss_bbox'])\n \n epoch+=1\n \n \n # train_detector(\n # model,\n # datasets,\n # cfg,\n # distributed=distributed,\n # validate=args.validate,\n # logger=logger)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "DBT/tools/train_stsn_r101.py", "file_name": "train_stsn_r101.py", "file_ext": "py", "file_size_in_byte": 12693, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 24, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 53, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 54, "usage_type": "attribute"}, {"api_name": "mmcv.runner.obj_from_dict", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 88, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 128, "usage_type": "attribute"}, {"api_name": "collections.OrderedDict", "line_number": 132, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 134, "usage_type": "attribute"}, {"api_name": "collections.OrderedDict", "line_number": 159, "usage_type": "call"}, {"api_name": "visdom.Visdom", "line_number": 167, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 168, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 171, "usage_type": "call"}, {"api_name": "mmcv.Config.fromfile", "line_number": 174, "usage_type": "call"}, {"api_name": "mmcv.Config", "line_number": 174, "usage_type": "name"}, {"api_name": "torch.backends", "line_number": 177, "usage_type": "attribute"}, {"api_name": "mmdet.apis.init_dist", "line_number": 194, "usage_type": "call"}, {"api_name": "mmdet.apis.get_root_logger", "line_number": 197, "usage_type": "call"}, {"api_name": "mmdet.apis.set_random_seed", "line_number": 203, "usage_type": "call"}, {"api_name": "mmdet.datasets.build_dataset", "line_number": 206, "usage_type": "call"}, {"api_name": "mmdet.datasets.build_dataset", "line_number": 208, "usage_type": "call"}, {"api_name": "mmdet.models.build_detector", "line_number": 209, "usage_type": "call"}, {"api_name": "mmcv.runner.load_checkpoint", "line_number": 212, "usage_type": "call"}, {"api_name": "mmdet.__version__", "line_number": 224, "usage_type": "name"}, {"api_name": "mmdet.datasets.build_dataloader", "line_number": 231, "usage_type": "call"}, {"api_name": "mmcv.parallel.MMDataParallel", "line_number": 239, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 276, "usage_type": "call"}, {"api_name": "IPython.embed", "line_number": 277, "usage_type": "call"}, {"api_name": "mmcv.mkdir_or_exist", "line_number": 302, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 302, "usage_type": "call"}, {"api_name": "os.path", "line_number": 302, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 303, "usage_type": "call"}, {"api_name": "os.path", "line_number": 303, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 304, "usage_type": "call"}]} +{"seq_id": "50933839", "text": "import sys\nfrom PyQt5.QtCore import Qt, QThread, pyqtSignal\nfrom PyQt5.QtWidgets import (QWidget, QLCDNumber, QSlider, QVBoxLayout, QApplication)\nimport sys\nimport glob\nimport serial\nimport time\n\n\nclass SomeThread(QThread):\n progressed = pyqtSignal(int)\n\n def __init__(self):\n super().__init__()\n\n def run(self):\n for i in range(1, 11):\n self.progressed.emit(i)\n time.sleep(0.5)\n\n\nclass Sliderdemo(QWidget):\n def __init__(self, vSl=32, parent=None):\n super(Sliderdemo, self).__init__(parent)\n lcd = QLCDNumber(self)\n lcd.display(32)\n vbox = QVBoxLayout()\n sld = QSlider(Qt.Horizontal, self)\n sld.setMinimum(32)\n sld.setTickInterval(1)\n sld.setMaximum(255)\n sld.setValue(vSl)\n sld.setTickPosition(QSlider.TicksBelow)\n sld.setTickInterval(10)\n vbox.addWidget(lcd)\n vbox.addWidget(sld)\n sld.valueChanged[int].connect(self.valuechange)\n self.setLayout(vbox)\n sld.valueChanged.connect(lcd.display)\n self.setWindowTitle(\"slider\")\n # print(self.valuechange())\n\n self.speed = 32\n self.thread = None\n\n if not self.thread:\n self.thread = SomeThread()\n self.thread.progressed.connect(self.on_progress)\n self.thread.finished.connect(self.on_finished)\n self.thread.start()\n\n # вызывать при завершении программы или по нажатию кнопки\n def on_finished(self):\n self.thread.progressed.disconnect(self.on_progress)\n self.thread.finished.disconnect(self.on_finished)\n self.thread = None\n\n def valuechange(self, value):\n self.speed = value\n print(\"__init__vSl -> \", self.speed)\n # return self.size\n\n def serial_ports(self):\n if sys.platform.startswith('win'):\n ports = ['COM%s' % (i + 1) for i in range(256)]\n elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n # this excludes your current terminal \"/dev/tty\"\n ports = glob.glob('/dev/tty[A-Za-z]*')\n elif sys.platform.startswith('darwin'):\n ports = glob.glob('/dev/tty.*')\n else:\n raise EnvironmentError('Unsupported platform')\n\n result = []\n for port in ports:\n try:\n s = serial.Serial(port)\n s.close()\n result.append(port)\n except (OSError, serial.SerialException):\n pass\n return result\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = Sliderdemo(32)\n ex.show()\n sys.exit(app.exec_())\n", "sub_path": "Архив/Мусор/serials.py", "file_name": "serials.py", "file_ext": "py", "file_size_in_byte": 2699, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "PyQt5.QtCore.QThread", "line_number": 10, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 11, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 19, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 22, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLCDNumber", "line_number": 25, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 27, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QSlider", "line_number": 28, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.Horizontal", "line_number": 28, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 28, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QSlider.TicksBelow", "line_number": 33, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QSlider", "line_number": 33, "usage_type": "name"}, {"api_name": "sys.platform.startswith", "line_number": 64, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 64, "usage_type": "attribute"}, {"api_name": "sys.platform.startswith", "line_number": 66, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 66, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 68, "usage_type": "call"}, {"api_name": "sys.platform.startswith", "line_number": 69, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 69, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 70, "usage_type": "call"}, {"api_name": "serial.Serial", "line_number": 77, "usage_type": "call"}, {"api_name": "serial.SerialException", "line_number": 80, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 86, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 86, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 89, "usage_type": "call"}]} +{"seq_id": "233475929", "text": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# author: bigfoolliu\n\n\n\"\"\"\nhttps://github.com/ReactiveX/RxPY\n\n异步编程又一方式\n\"\"\"\n\nimport rx\nfrom rx import operators as ops\n\nsource = rx.of(\"Alpha\", \"Beta\", \"Gamma\", \"Delta\", \"Epsilon\")\n\ncomposed = source.pipe(\n ops.map(lambda s: len(s)),\n ops.filter(lambda i: i >= 5)\n)\n\ncomposed.subscribe(lambda value: print(\"Received {0}\".format(value)))\n", "sub_path": "language/python/modules/Other/rx/rx_module.py", "file_name": "rx_module.py", "file_ext": "py", "file_size_in_byte": 404, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "rx.of", "line_number": 15, "usage_type": "call"}, {"api_name": "rx.operators.map", "line_number": 18, "usage_type": "call"}, {"api_name": "rx.operators", "line_number": 18, "usage_type": "name"}, {"api_name": "rx.operators.filter", "line_number": 19, "usage_type": "call"}, {"api_name": "rx.operators", "line_number": 19, "usage_type": "name"}]} +{"seq_id": "56881537", "text": "import asyncio\nimport threading\nfrom typing import Optional\n\nimport gevent\n\n\ndef yield_future(\n future: asyncio.Future, loop: Optional[asyncio.AbstractEventLoop] = None\n) -> gevent.Greenlet:\n \"\"\"Wait for a future, a task, or a coroutine object from a greenlet.\n\n Yield control other eligible greenlet until the future is done (finished\n successfully or failed with an exception).\n\n Return the result or raise the exception of the future.\n\n The function must not be called from the greenlet running the aiogreen\n event loop.\n \"\"\"\n loop = loop or asyncio.get_event_loop()\n\n future = asyncio.ensure_future(future, loop=loop)\n\n if future._loop._greenlet == gevent.getcurrent():\n raise RuntimeError(\n \"yield_future() must not be called from \"\n \"the greenlet of the aiogreen event loop\"\n )\n\n event = gevent.event.Event()\n\n def wakeup_event(fut):\n event.set()\n\n future.add_done_callback(wakeup_event)\n event.wait()\n\n return future.result()\n", "sub_path": "asyncio_gevent/yield_future.py", "file_name": "yield_future.py", "file_ext": "py", "file_size_in_byte": 1024, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "asyncio.Future", "line_number": 9, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 9, "usage_type": "name"}, {"api_name": "asyncio.AbstractEventLoop", "line_number": 9, "usage_type": "attribute"}, {"api_name": "asyncio.get_event_loop", "line_number": 21, "usage_type": "call"}, {"api_name": "asyncio.ensure_future", "line_number": 23, "usage_type": "call"}, {"api_name": "gevent.getcurrent", "line_number": 25, "usage_type": "call"}, {"api_name": "gevent.event.Event", "line_number": 31, "usage_type": "call"}, {"api_name": "gevent.event", "line_number": 31, "usage_type": "attribute"}, {"api_name": "gevent.Greenlet", "line_number": 10, "usage_type": "attribute"}]} +{"seq_id": "170504898", "text": "import socket\nimport epdb\nfrom twisted.internet.protocol import Factory\nfrom twisted.internet.protocol import Protocol\nfrom twisted.internet.endpoints import TCP4ServerEndpoint\nfrom twisted.internet import reactor\n\nMSGLEN=1024\nHERTZ=100\n\nclass ccolors:\n\tOKGREEN='\\033[92m'\n\tOKGRAY='\\033[90m'\n\tOKPINK='\\033[91m'\n\tOKYELLOW='\\033[93m'\n\tOKBLUE='\\033[94m'\n\tOKPURPLE='\\033[95m'\n\tOKAQUA='\\033[96m'\n\tOKWHITE='\\033[97m'\n\tOKNEXT='\\033[92m'\n\tENDC='\\033[0m'\n\n\nclass rproc(Protocol):\n\tdef connectionMade(self):\n\t\tself.transport.write('mesg recvd!')\n\t\tself.transport.loseConnection()\n\nclass rprocSrv(Factory):\n\tdef buildProtocol(self,addr):\n\t\treturn rproc()\n\n\n\n\n\n\n\n\nclass clsProcs:\n\tpid=0\n\tcmdline=''\n\tcpu=0\n\tmemory=0\n\tstatline=''\n\tcurState=''\n\tprio=''\n\taddress=''\n\tstatuses=[]\n\tkerneljiffies=0\n\tuserjiffies=0\n\tuptime=0\n\tdef __init__(self,pid=0,statuses='',statline='',prio='',address='',uptime=''):\n\t\tstats=statline.strip().split()\n\t\tself.pid=pid\n\t\tself.cmdline=statuses[1]\n\t\tself.statuses=statuses\n\t\tself.statline=statline\n\t\tself.address=address\n\t\tself.uptime=uptime\n\t\tself.cutime=stats[15]\n\t\tself.cktime=stats[16]\n\t\tself.cputime=(float(self.cutime)+float(self.cktime)+float(self.userjiffies)+float(self.kerneljiffies))/100\n\t\ttry:\n\t\t\ttotaltime=0\n\t\t\ttotaltime=int(stats[14])+int(stats[13])\n\t\t\tsecsTot=float(self.uptime)-(float(stats[21])/100)\n\t\t\tself.cpu=100*((float(totaltime)/HERTZ)/float(secsTot))\n\t\texcept IndexError:\n\t\t\tpass\n\t\ttry:\n\t\t\tself.curState=stats[2]\n\t\texcept IndexError:\n\t\t\tpass\n\t\ttry:\n\t\t\tself.prio=stats[39]\n\t\texcept IndexError:\n\t\t\tpass\n\t\ttry:\n\t\t\tself.kerneljiffies=stats[14]\n\t\texcept IndexError:\n\t\t\tpass\n\t\ttry:\n\t\t\tself.userjiffies=stats[13]\n\t\texcept IndexError:\n\t\t\tpass\n\t\treturn\nclass mySock:\n\tdef __init__(self,sock=None):\n\t\tif sock is None:\n\t\t\tself.sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\t\telse:\n\t\t\tself.sock=sock\n\n\tdef connect(self,host,port):\n\t\tself.sock.connect((host,port))\n\tdef mybind(self,host,port):\n\t\tself.sock.bind((host,port))\n\tdef mylisten(self,maxconns):\n\t\tself.sock.listen(maxconns)\n\n\tdef mysend(self,msg):\n\t\ttotalsent=0\n\t\twhile totalsent 0:\n print(\"Retreived \" + str(mall_json))\n df = concat([df, json_normalize(mall_json)])\n else:\n print(mall + \" not found!\")\n\n df.to_csv(\"malls.csv\")\n\n", "sub_path": "download/download_malls.py", "file_name": "download_malls.py", "file_ext": "py", "file_size_in_byte": 4496, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pandas.DataFrame", "line_number": 179, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 181, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 187, "usage_type": "call"}, {"api_name": "pandas.json_normalize", "line_number": 187, "usage_type": "call"}]} +{"seq_id": "539937588", "text": "import re\nimport urllib.request,urllib.error\nimport bs4\n\nbaseUrl = \"https://www.luogu.com.cn/problem/P\"\nsavePath = \"C:\\\\Users\\\\alant\\\\Desktop\\\\洛谷题目爬取\\\\\"\n\n# minn = 1316\n# maxn = 2000 #最大题号\n#\n# def main():\n# print(\"计划爬取到P{}\".format(maxn))\n# for i in range(minn,maxn+1):\n# print(\"正在爬取P{}...\".format(i),end=\"\")\n# html = getHTML(baseUrl + str(i))\n# if html == \"error\":\n# print(\"爬取失败,可能是不存在该题或无权查看\")\n# else:\n# problemMD = getMD(html)\n# print(\"爬取成功!正在保存...\",end=\"\")\n# saveData(problemMD,\"P\"+str(i)+\".md\")\n# print(\"保存成功!\")\n# print(\"爬取完毕\")\n\ndef main():\n pNum = input(\"请输入题目编号:\")\n html = getHTML(baseUrl+pNum)\n if html == \"error\":\n print(\"爬取失败,可能是不存在该题或无权查看\")\n else:\n problemMD = getMD(html)\n print(\"爬取成功!正在保存...\", end=\"\")\n saveData(problemMD, \"README\" + \".md\")\n filePath=savePath+\"P\"+pNum+\".cpp\";\n file =open(filePath,\"x\");#创建cpp文件并写入头文件和创建main函数\n file.write(\"#include \\n\\nusing namespace std;\\n\\nint main(){\\n\\n\\treturn 0;\\n}\")\n print(\"保存成功!\")\n\n\n\n\ndef getHTML(url):\n headers = {\n \"user-agent\": \"Mozilla / 5.0(Windows NT 10.0;Win64;x64) AppleWebKit / 537.36(KHTML, likeGecko) Chrome / 85.0.4183.121 Safari / 537.36\"\n }\n request = urllib.request.Request(url = url,headers = headers)\n response = urllib.request.urlopen(request)\n html = response.read().decode('utf-8')\n if str(html).find(\"Exception\") == -1:#洛谷中没找到该题目或无权查看的提示网页中会有该字样\n return html\n else:\n return \"error\"\n\ndef getMD(html):\n bs = bs4.BeautifulSoup(html,\"html.parser\")\n core = bs.select(\"article\")[0]\n md = str(core)\n md = re.sub(\"

\",\"# \",md)\n md = re.sub(\"

\",\"## \",md)\n md = re.sub(\"

\",\"#### \",md)\n md = re.sub(\"]*>\",\"\",md)\n return md\n\ndef saveData(data,filename):\n cfilename = savePath + filename\n file = open(cfilename,\"w\",encoding=\"utf-8\")\n for d in data:\n file.writelines(d)\n file.close()\n\nif __name__ == '__main__':\n main()\n", "sub_path": "洛谷题目爬取/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2352, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "urllib.request.request.Request", "line_number": 46, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 46, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 46, "usage_type": "name"}, {"api_name": "urllib.request.request.urlopen", "line_number": 47, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 47, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 47, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 55, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 58, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 59, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 60, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "593312382", "text": "from flask import Flask, render_template,session,redirect,url_for,flash\r\nfrom flask_wtf import FlaskForm\r\nfrom wtforms import (StringField,BooleanField,\r\n RadioField,SelectField,\r\n TextAreaField,SubmitField)\r\n\r\n\r\n\r\napp = Flask(__name__)\r\n\r\napp.config['SECRET_KEY'] = 'my_key'\r\n\r\nclass SimpleForm(FlaskForm):\r\n breed = StringField('What breed are you?')\r\n submit = SubmitField('Click me')\r\n\r\n@app.route('/',methods = ['GET','POST'])\r\ndef index():\r\n\r\n form = SimpleForm()\r\n\r\n if form.validate_on_submit():\r\n session['breed'] = form.breed.data\r\n flash(f\"you just changes your breede to: {session['breed']})\")\r\n flash(\"hello\")\r\n\r\n return redirect(url_for('index'))\r\n\r\n return render_template('flashalerts.html', form = form)\r\n\r\nif __name__=='__main__':\r\n app.run(debug=True)\r\n", "sub_path": "PYTHON/FLASK/FORMS/flashlerts.py", "file_name": "flashlerts.py", "file_ext": "py", "file_size_in_byte": 858, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 13, "usage_type": "name"}, {"api_name": "wtforms.StringField", "line_number": 14, "usage_type": "call"}, {"api_name": "wtforms.SubmitField", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "525664649", "text": "import os\nimport sys\nimport time\nimport json\n\nfrom selenium import webdriver\n\n# local config.py\nimport config\nfrom game import *\n\n\ndef get_fitness(game):\n return (1 / game.end_distance) + (game.collected_coins / 135) + (game.level * 2)\n\n\ndef main():\n if config.driver_path and not os.path.exists(config.driver_path):\n print(\"The driver path in the config doesn't exist\")\n print(\n \"You can download the chromium drivers from their official\"\n \"website:\"\n \"\\n\\t- Access http://chromedriver.chromium.org/downloads and \"\n \"download the drivers.\"\n \"\\n\\t- Place the drivers in the drivers folder or in other place\"\n \"\\n\\t- If you are on linux or macOS you might need to give \"\n \"\\n\\t permission to that file, ex: sudo chmod +x chromedriver\"\n \"\\n\\t- Edit the 'driver_path' in the config.py file with the path\"\n \"of the drivers you downloaded\"\n )\n sys.exit(1)\n\n # Create driver with all the arguments\n options = webdriver.ChromeOptions()\n # to leave browser open\n options.add_experimental_option(\"detach\", True)\n options.add_argument(\"--log-level=%d\" % int(config.driver_log_level))\n options.add_argument(\"--window-size=900,800\")\n if config.driver_headless:\n options.add_argument(\"headless\")\n\n driver = webdriver.Chrome(\n config.driver_path, service_log_path=\"driver.log\", options=options\n )\n\n real_url = \"file://\" + os.path.join(os.getcwd(), config.base_url)\n driver.implicitly_wait(10)\n\n print(real_url)\n # Go to the url\n driver.get(real_url)\n\n data = GameData(driver)\n do = GameInput(driver)\n\n print(type(data.get_data()))\n print(data.get_data())\n print(get_fitness(data))\n\n do.up()\n do.left()\n do.down()\n do.right()\n do.up()\n do.left()\n do.down()\n do.right()\n\n driver.implicitly_wait(10)\n\n time.sleep(1)\n\n driver.quit()\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt:\n print(\"Interrupted by the user.\")\n sys.exit()\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2097, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "game.end_distance", "line_number": 14, "usage_type": "attribute"}, {"api_name": "game.collected_coins", "line_number": 14, "usage_type": "attribute"}, {"api_name": "game.level", "line_number": 14, "usage_type": "attribute"}, {"api_name": "config.driver_path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 31, "usage_type": "call"}, {"api_name": "selenium.webdriver.ChromeOptions", "line_number": 34, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 34, "usage_type": "name"}, {"api_name": "config.driver_log_level", "line_number": 37, "usage_type": "attribute"}, {"api_name": "config.driver_headless", "line_number": 39, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 42, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 42, "usage_type": "name"}, {"api_name": "config.driver_path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 46, "usage_type": "call"}, {"api_name": "config.base_url", "line_number": 46, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 71, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "3579050", "text": "from django.conf.urls import url\nfrom destination_dog import views\n\nurlpatterns = [\n url(r'^$', views.home, name='home'),\n url(r'articles/$', views.article_list, name='article_list'),\n url(r'^articles/add_article/', views.add_article, name='add_article'),\n url(r'^articles/(?P[\\w\\-]+)/', views.show_article, name='show_article'),\n url(r'^dogofthemonth/$', views.dotm, name='dotm'),\n url(r'^dogofthemonth/vote/', views.dotm_vote, name='dotm_vote'),\n url(r'^dogofthemonth/enter/', views.dotm_enter, name='dotm_enter'),\n url(r'^dogofthemonth/hall_of_fame/', views.dotm_hall_of_fame, name='dotm_hall_of_fame'),\n url(r'^locateservice/$', views.locateServices, name='locateservice'),\n url(r'^locateservice/add_service/$', views.add_service, name='add_service'),\n url(r'^locateservice/(?P[\\w\\-]+)/', views.show_service, name='show_service'),\n url(r'^forum', views.forum, name='forum'),\n url(r'^events/$', views.events, name='events'),\n url(r'^events/add_event/$', views.add_events, name='add_events'),\n url(r'^events/(?P[\\w\\-]+)/', views.show_event, name='show_event'),\n url(r'^about', views.about, name='about'),\n url(r'^contactus', views.contactus, name='contactus'),\n url(r'^sitemap', views.sitemap, name='sitemap'),\n url(r'^login/$', views.user_login, name='login'),\n url(r'^logout/$', views.user_logout, name='logout'),\n url(r'^register/$', views.register, name='register'),\n url(r'^like/$', views.vote_dotm, name='vote_dotm'),\n url(r'^profiles/$', views.list_profiles, name='list_profiles'),\n url(r'^profile/(?P[\\w\\-]+)/$', views.profile, name='user_profile'),\n url(r'^profile/(?P[\\w\\-]+)/add_dog/$', views.add_dog, name='add_dog'),\n url(r'^dogprofile/(?P[\\w\\-]+)/', views.dogprofile, name='dogprofile'),\n url(r'^deactivate/$', views.deactivate_profile, name=\"deactivate\")\n\n]\n", "sub_path": "destination_dog/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1947, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.conf.urls.url", "line_number": 5, "usage_type": "call"}, {"api_name": "destination_dog.views.home", "line_number": 5, "usage_type": "attribute"}, {"api_name": "destination_dog.views", "line_number": 5, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "destination_dog.views.article_list", "line_number": 6, "usage_type": "attribute"}, {"api_name": "destination_dog.views", "line_number": 6, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "destination_dog.views.add_article", "line_number": 7, "usage_type": "attribute"}, {"api_name": "destination_dog.views", "line_number": 7, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "destination_dog.views.show_article", "line_number": 8, "usage_type": "attribute"}, {"api_name": "destination_dog.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "destination_dog.views.dotm", "line_number": 9, "usage_type": "attribute"}, {"api_name": "destination_dog.views", "line_number": 9, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "destination_dog.views.dotm_vote", "line_number": 10, "usage_type": "attribute"}, {"api_name": "destination_dog.views", "line_number": 10, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "destination_dog.views.dotm_enter", "line_number": 11, "usage_type": "attribute"}, {"api_name": "destination_dog.views", "line_number": 11, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "destination_dog.views.dotm_hall_of_fame", "line_number": 12, "usage_type": "attribute"}, {"api_name": "destination_dog.views", "line_number": 12, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "destination_dog.views.locateServices", "line_number": 13, "usage_type": "attribute"}, {"api_name": "destination_dog.views", "line_number": 13, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "destination_dog.views.add_service", "line_number": 14, "usage_type": "attribute"}, {"api_name": "destination_dog.views", "line_number": 14, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "destination_dog.views.show_service", "line_number": 15, "usage_type": "attribute"}, {"api_name": "destination_dog.views", "line_number": 15, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "destination_dog.views.forum", "line_number": 16, "usage_type": "attribute"}, {"api_name": "destination_dog.views", "line_number": 16, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "destination_dog.views.events", "line_number": 17, "usage_type": "attribute"}, {"api_name": "destination_dog.views", "line_number": 17, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "destination_dog.views.add_events", "line_number": 18, "usage_type": "attribute"}, {"api_name": "destination_dog.views", "line_number": 18, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "destination_dog.views.show_event", "line_number": 19, "usage_type": "attribute"}, {"api_name": "destination_dog.views", "line_number": 19, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 20, "usage_type": "call"}, {"api_name": "destination_dog.views.about", "line_number": 20, "usage_type": "attribute"}, {"api_name": "destination_dog.views", "line_number": 20, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 21, "usage_type": "call"}, {"api_name": "destination_dog.views.contactus", "line_number": 21, "usage_type": "attribute"}, {"api_name": "destination_dog.views", "line_number": 21, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 22, "usage_type": "call"}, {"api_name": "destination_dog.views.sitemap", "line_number": 22, "usage_type": "attribute"}, {"api_name": "destination_dog.views", "line_number": 22, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 23, "usage_type": "call"}, {"api_name": "destination_dog.views.user_login", "line_number": 23, "usage_type": "attribute"}, {"api_name": "destination_dog.views", "line_number": 23, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 24, "usage_type": "call"}, {"api_name": "destination_dog.views.user_logout", "line_number": 24, "usage_type": "attribute"}, {"api_name": "destination_dog.views", "line_number": 24, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}, {"api_name": "destination_dog.views.register", "line_number": 25, "usage_type": "attribute"}, {"api_name": "destination_dog.views", "line_number": 25, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 26, "usage_type": "call"}, {"api_name": "destination_dog.views.vote_dotm", "line_number": 26, "usage_type": "attribute"}, {"api_name": "destination_dog.views", "line_number": 26, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 27, "usage_type": "call"}, {"api_name": "destination_dog.views.list_profiles", "line_number": 27, "usage_type": "attribute"}, {"api_name": "destination_dog.views", "line_number": 27, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 28, "usage_type": "call"}, {"api_name": "destination_dog.views.profile", "line_number": 28, "usage_type": "attribute"}, {"api_name": "destination_dog.views", "line_number": 28, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 29, "usage_type": "call"}, {"api_name": "destination_dog.views.add_dog", "line_number": 29, "usage_type": "attribute"}, {"api_name": "destination_dog.views", "line_number": 29, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 30, "usage_type": "call"}, {"api_name": "destination_dog.views.dogprofile", "line_number": 30, "usage_type": "attribute"}, {"api_name": "destination_dog.views", "line_number": 30, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 31, "usage_type": "call"}, {"api_name": "destination_dog.views.deactivate_profile", "line_number": 31, "usage_type": "attribute"}, {"api_name": "destination_dog.views", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "449893127", "text": "#!/usr/bin/python\n\nimport subprocess\nfrom dialog import ReloadingDialog\n\nclass File:\n\n def __init__(self, bar=None):\n self.bar = bar\n self.log_file = \"/var/log/messages\"\n self.fdialog = ReloadingDialog()\n self.array = []\n self.secarray = []\n self.pid = []\n\n def read(self):\n progress = 0\n \n self.fdialog.show_all()\n with open(self.log_file) as f:\n size = sum(1 for _ in f)\n f.close()\n\n fobj = open(self.log_file)\n for line in fobj:\n row = self.parse_line(line.rstrip())\n if not any(row[0] in s for s in self.secarray):\n self.secarray.append(row[0])\n if not any(row[1] in s for s in self.pid):\n self.pid.append(row[1])\n self.array.append(row)\n progress += 1\n self.fdialog.read_line(progress, size)\n fobj.close()\n\n self.fdialog.hide_all()\n\n\n def parse_line(self, line):\n array = []\n array_sw = []\n arrayinfo = line.split( );\n arraymessage = line.split(arrayinfo[4])\n \n array_sw.append(arrayinfo[0])\n array_sw.append(arrayinfo[1])\n array_sw.append(arrayinfo[2])\n array_sw.append(arrayinfo[3])\n array_sw.append(arraymessage[-1][1:])\n array.append(arrayinfo[4][:-1].split(\"[\")[0])\n if (len(arrayinfo[4][:-1].split(\"[\")) < 2):\n array.append(\"None\")\n else:\n array.append(arrayinfo[4][:-1].split(\"[\")[1][:-1])\n array.append(array_sw)\n\n return array\n", "sub_path": "src/log.py", "file_name": "log.py", "file_ext": "py", "file_size_in_byte": 1923, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "dialog.ReloadingDialog", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "402075211", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Mar 16 20:00:18 2019\r\n\r\n@author: Evan\r\n\"\"\"\r\n\r\nimport time\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport numpy as np\r\nimport calendar\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn import preprocessing\r\nfrom sklearn.metrics import mean_squared_error\r\nimport csv\r\nfrom datetime import date\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.optim as optimizerim\r\nimport torch.nn.functional as F\r\nfrom torch.utils import data\r\nfrom torch.optim import lr_scheduler\r\nimport os\r\nimport copy\r\n\r\ntest_path = '/home/evan/PP_TestData.csv'\r\nencode_path = '/home/evan/Encoding/'\r\nprint(\"Script Running!\")\r\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\nprint(\"Using Device: \", device )\r\nheaders = [\"Model Name\", \"Epoch\",\"Train RMSE\", \"Train Loss\", \"Validation RMSE\", \"Validation Loss\"]\r\ncalendar_dict = {month: i for i,month in enumerate(calendar.month_abbr)}\r\ndata_collection_date = date(2019,2,1)\r\n\r\nmax_price = 200000\r\nmax_mileage = 400000\r\ntorch.manual_seed(0)\r\n\r\nnumerical_labels = ['engine', 'year', 'NCT',\r\n 'previousOwners', 'mileage']\r\n\r\nlabels = ['price', 'make', 'model', 'year', 'mileage', 'fuelType', 'transmission',\r\n 'engine', 'previousOwners', 'colour', \r\n 'county','sellerType','newCar' ]\r\n\r\ncategorical_features = list(set(labels) - set(numerical_labels))\r\ncategorical_features.remove('price')\r\n\r\n\r\n# %% Functions\r\ndef calculate_NCT(data):\r\n data['NCT'].fillna(value=0, inplace=True)\r\n #data['roadTax'].fillna(value=0, inplace=True)\r\n\r\n for i, row in data.iterrows(): \r\n if row.NCT !=0:\r\n NCT_date = row.NCT\r\n if NCT_date[:3] in calendar_dict:\r\n month = calendar_dict[NCT_date[:3]]\r\n year = int(float(NCT_date[4:])) \r\n NCT_date = date(year, month, 1)\r\n difference = NCT_date - data_collection_date\r\n if difference.days < 0:\r\n data.at[i,'NCT'] = 0 \r\n else:\r\n data.at[i,'NCT'] = difference.days \r\n else:\r\n data.at[i,'NCT'] = 0\r\n \r\n return data\r\n\r\n\r\ndef prepare_data(data): \r\n \r\n print(\"-\"*20) \r\n print(\"Number of Rows:\\t\\t\", len(data))\r\n print(\"Number of Features:\\t\", len(data.columns))\r\n print(\"-\"*20) \r\n print(\"-\"*20) \r\n missing_values = data.isnull().sum()\r\n missing_values = missing_values[missing_values>0].sort_values(ascending = False)\r\n print(\"Missing Values:\")\r\n print(missing_values.head(len(missing_values)))\r\n # Find days of NCT remaining\r\n data = calculate_NCT(data)\r\n\r\n\r\n # Filter outliers\r\n data.drop(data[data['price'] > max_price].index,inplace = True)\r\n data.drop(data[data['mileage'] > max_mileage].index,inplace = True)\r\n # Drop Cars from Northern Ireland as VRT not included\r\n data.drop(data[data['currency'] == 'GBP'].index,inplace = True)\r\n \r\n print(\"-\"*20) \r\n missing_values = data.isnull().sum()\r\n missing_values = missing_values[missing_values>0].sort_values(ascending = False)\r\n print(\"Missing Values:\")\r\n print(missing_values.head(len(missing_values)))\r\n \r\n data.drop(['id' , 'description','maxSpeed', 'mpgCombined', 'cylinders', 'noughtToSixty', 'tankCapacity',\r\n 'vehicleHeight', 'vehicleLength', 'vehicleWidth', 'numDoors','roadTax', 'currency', 'country'\r\n , 'greenlightVerified', 'bodyType'], axis=1, inplace=True) \r\n # Check missing values\r\n print(\"-\"*20) \r\n missing_values = data.isnull().sum()\r\n missing_values = missing_values[missing_values>0].sort_values(ascending = False)\r\n print(\"Missing Values:\")\r\n print(missing_values.head(len(missing_values)))\r\n\r\n # Deal with NAN values and convert strings to numeric values\r\n data = data.dropna(subset = ['model', 'price' , 'year'])\r\n for col in data.columns:\r\n if col in numerical_labels + ['price']:\r\n data[col].fillna(value=0, inplace=True)\r\n pd.to_numeric(data[col])\r\n else:\r\n data[col].fillna(value='not-declared', inplace=True)\r\n \r\n \r\n output = np.log(data.price)\r\n data.drop(['price'], axis=1, inplace=True)\r\n\r\n \r\n # Check missing Values\r\n print(\"-\"*20) \r\n missing_values = data.isnull().sum()\r\n missing_values = missing_values[missing_values>0].sort_values(ascending = False)\r\n print(\"Missing Values After Filtering:\\t\", len(missing_values))\r\n print(\"Number of Rows:\\t\\t\", len(data))\r\n print(\"Number of Features:\\t\", len(data.columns))\r\n \r\n # Encode categorical data\r\n print(\"-\"*20) \r\n print(\"Encoding Categorical Data...\")\r\n #label = LabelEncoder()\r\n for l in data.columns:\r\n if l not in numerical_labels:\r\n print(\"Encoding \" , l ,\"...\")\r\n en_map = dict(zip(data[l].unique(), range(0,len(data[l].unique()))))\r\n df = pd.DataFrame.from_dict(en_map ,orient = \"index\")\r\n df.to_csv(encode_path+l+\".csv\", encoding='utf-8' )\r\n data[l] = data[l].map(en_map)\r\n \r\n print(\"Data Encoded!\")\r\n print(\"-\"*20) \r\n\r\n return data, output\r\n\r\n\r\ndef split_dataset(data, output, split): \r\n return train_test_split(data, output, test_size=split, random_state=42)\r\n\r\ndef inv_y(y): \r\n return np.exp(y)\r\n\r\ndef get_numf_scaler(train): return preprocessing.StandardScaler().fit(train)\r\n\r\ndef scale_numf(df, num, scaler):\r\n cols = numerical_labels\r\n index = df.index\r\n scaled = scaler.transform(df[numerical_labels])\r\n scaled = pd.DataFrame(scaled, columns=cols, index=index)\r\n return pd.concat([scaled, df.drop(numerical_labels, axis=1)], axis=1)\r\n\r\nclass RegressionColumnarDataset(data.Dataset):\r\n def __init__(self, df, cats, y):\r\n self.dfcats = df[cats]\r\n self.dfconts = df.drop(cats, axis=1)\r\n \r\n self.cats = np.stack([c.values for n, c in self.dfcats.items()], axis=1).astype(np.int64)\r\n self.conts = np.stack([c.values for n, c in self.dfconts.items()], axis=1).astype(np.float32)\r\n self.y = y.values.astype(np.float32)\r\n \r\n def __len__(self):\r\n return len(self.y)\r\n\r\n def __getitem__(self, idx):\r\n return [self.cats[idx], self.conts[idx], self.y[idx]]\r\n \r\ndef rmse(targ, y_pred):\r\n return np.sqrt(mean_squared_error(inv_y(y_pred), inv_y(targ))) #.detach().numpy()\r\n\r\ndef emb_init(x):\r\n x = x.weight.data\r\n sc = 2/(x.size(1)+1)\r\n x.uniform_(-sc,sc)\r\n\r\nclass MixedInputModel(nn.Module):\r\n def __init__(self, emb_sizes, n_cont, emb_drop, out_size, sizes, drops, y_range= None, use_bn=True):\r\n super().__init__()\r\n for i,(c,s) in enumerate(emb_sizes): assert c > 1, f\"cardinality must be >=2, got emb_sizes[{i}]: ({c},{s})\"\r\n self.embs = nn.ModuleList([nn.Embedding(c, s) for c,s in emb_sizes])\r\n for emb in self.embs: emb_init(emb)\r\n n_emb = sum(e.embedding_dim for e in self.embs)\r\n self.n_emb, self.n_cont=n_emb, n_cont\r\n \r\n sizes = [n_emb+n_cont] + sizes\r\n self.lins = nn.ModuleList([nn.Linear(sizes[i], sizes[i+1]) for i in range(len(sizes)-1)])\r\n self.bns = nn.ModuleList([nn.BatchNorm1d(size) for size in sizes[1:]])\r\n for a in self.bns:\r\n a.track_running_stats = False\r\n for o in self.lins: nn.init.kaiming_normal_(o.weight.data)\r\n self.outp = nn.Linear(sizes[-1], out_size)\r\n nn.init.kaiming_normal_(self.outp.weight.data)\r\n\r\n self.emb_drop = nn.Dropout(emb_drop)\r\n self.drops = nn.ModuleList([nn.Dropout(drop) for drop in drops])\r\n self.bn = nn.BatchNorm1d(n_cont)\r\n if y_range:\r\n self.y_range = y_range\r\n self.use_bn= use_bn\r\n\r\n def forward(self, x_cat, x_cont):\r\n if self.n_emb != 0:\r\n x = [e(x_cat[:,i]) for i,e in enumerate(self.embs)]\r\n x = torch.cat(x, 1)\r\n x = self.emb_drop(x)\r\n if self.n_cont != 0:\r\n x2 = self.bn(x_cont)\r\n x = torch.cat([x, x2], 1) if self.n_emb != 0 else x2\r\n for l,d,b in zip(self.lins, self.drops, self.bns):\r\n x = F.relu(l(x))\r\n if self.use_bn: x = b(x)\r\n x = d(x)\r\n x = self.outp(x)\r\n if self.y_range:\r\n x = torch.sigmoid(x)\r\n x = x*(self.y_range[1] - self.y_range[0])\r\n x = x+self.y_range[0]\r\n return x.squeeze()\r\n\r\ndef train(model, train_dl, val_dl, loss_fn, optimizer, scheduler, num_epochs):\r\n since = time.time()\r\n #Initially set lowest rmse and loss very large\r\n lowest_val_rmse = 999999\r\n lowest_val_loss = 999999\r\n\r\n print(\"Begining Training for Epochs: \", num_epochs)\r\n for epoch in range(1,num_epochs+1): \r\n print('-' * 20)\r\n print('Epoch ', epoch)\r\n print((((epoch-1)/num_epochs))*100, \"% Completed\")\r\n\r\n y_true_train = list()\r\n y_pred_train = list()\r\n total_loss_train = 0 \r\n row = [model_name, epoch]\r\n model = model.train()\r\n\r\n\r\n# Training Section\r\n for cat, cont, y in train_dl:\r\n \r\n cat = cat.to(device)\r\n cont = cont.to(device)\r\n y = y.to(device)\r\n \r\n optimizer.zero_grad()\r\n pred = model(cat, cont)\r\n loss = loss_fn(pred, y)\r\n loss.backward()\r\n\r\n scheduler.step()\r\n optimizer.step()\r\n \r\n y_true_train += list(y.cpu().data.numpy())\r\n y_pred_train += list(pred.cpu().data.numpy())\r\n total_loss_train += loss.item()\r\n \r\n train_rmse = rmse(y_true_train, y_pred_train)\r\n train_loss = total_loss_train/len(train_dl)\r\n print('Training RMSE:\\t {:.4f}'.format(train_rmse))\r\n print('Training loss:\\t {:.4f}'.format(train_loss))\r\n row.append(train_rmse)\r\n row.append(train_loss)\r\n \r\n# Validation Section\r\n y_true_val = list()\r\n y_pred_val = list()\r\n total_loss_val = 0\r\n optimizer.zero_grad()\r\n model = model.eval()\r\n for cat, cont, y in val_dl:\r\n\r\n cat = cat.to(device)\r\n cont = cont.to(device)\r\n y = y.to(device)\r\n pred = model(cat, cont)\r\n loss = loss_fn(pred, y)\r\n\r\n y_true_val += list(y.cpu().data.numpy())\r\n y_pred_val += list(pred.cpu().data.numpy())\r\n total_loss_val += loss.item()\r\n val_rmse = rmse(y_true_val, y_pred_val)\r\n val_loss = total_loss_val/len(valdl)\r\n if val_rmse < lowest_val_rmse:\r\n lowest_val_rmse = val_rmse\r\n if val_loss < lowest_val_loss:\r\n lowest_val_loss = val_loss\r\n model_best = copy.deepcopy(model.state_dict())\r\n\r\n print('Validation RMSE: {:.4f}'.format(val_rmse))\r\n print('Validation loss: {:.4f}'.format(val_loss))\r\n row.append(val_rmse)\r\n row.append(val_loss)\r\n writer.writerow(row) \r\n\r\n print('-' * 20)\r\n time_elapsed = time.time() - since\r\n print('Training complete in {:.0f}m {:.0f}s'.format(\r\n time_elapsed // 60, time_elapsed % 60))\r\n print(\"Lowest Validation Loss: \", lowest_val_loss) \r\n print(\"Lowest Validation RMSE: \", lowest_val_rmse) \r\n print('-' * 20)\r\n model.load_state_dict(model_best)\r\n\r\n return model\r\n\r\n\r\n\r\n# %% Sort and filter dataCSV\r\n\r\n#os.chdir('C:/Users/Evan/Documents/College/4th Year/FYP/price_prediction')\r\n#dataCSV = pd.read_csv('DB_DoneDeal_TEST.csv')\r\ndataCSV = pd.read_csv('/data/evan/donedeal_DB/DB_DoneDeal.csv')\r\n\r\n\r\n##################\r\nprint(\"-\"*20) \r\nprint(\"Processing and Splitting Data....\")\r\n\r\n\r\n\r\n\r\n\r\ndata_full, y = prepare_data(dataCSV)\r\n\r\nX, X_test, y, y_test = split_dataset(data_full, y , 0.2)\r\nX_train, X_val, y_train, y_val = split_dataset(X, y , 0.3)\r\n#data_train_val, test = train_test_split(dataCSV, test_size=0.2, random_state=42)\r\n\r\n\r\ntest = pd.concat([X_test, y_test], axis=1)\r\ntest.to_csv(test_path, encoding='utf-8')\r\n\r\n\r\nfor l in X_train.columns:\r\n print(l)\r\n print(\"Train:\\t\" ,X_train[l].nunique() )\r\n print(\"Val:\\t\" ,X_val[l].nunique() )\r\n print(\"Test:\\t\" ,X_test[l].nunique() )\r\n\r\n\r\nprint(\"Dataset Size:\\t\", len(data_full), \"\\tFeatures: \",len(data_full.columns) )\r\nprint(\"Training Size:\\t\", len(X_train), \"\\tFeatures: \",len(X_train.columns) )\r\nprint(\"Val Size:\\t\", len(X_val), \"\\tFeatures: \",len(X_val.columns) )\r\nprint(\"Test Size:\\t\", len(test), \"\\tFeatures: \",len(test.columns) )\r\n\r\n\r\nprint(\"Data Processed!!\")\r\nprint(\"-\"*20) \r\n##################\r\n\r\nscaler = get_numf_scaler(X_train[numerical_labels])\r\nX_train_sc = scale_numf(X_train, numerical_labels, scaler)\r\n\r\nX_val_sc = scale_numf(X_val, numerical_labels, scaler)\r\nX_test_sc = scale_numf(X_test, numerical_labels, scaler)\r\n\r\ntrainds = RegressionColumnarDataset(X_train_sc, categorical_features, y_train)\r\nvalds = RegressionColumnarDataset(X_val_sc, categorical_features, y_val)\r\ntestds = RegressionColumnarDataset(X_test_sc, categorical_features, y_test)\r\n\r\n\r\nparams = {'batch_size': 128,\r\n 'shuffle': True,\r\n 'num_workers': 8}\r\n\r\ntraindl = data.DataLoader(trainds, **params)\r\nvaldl = data.DataLoader(valds, **params)\r\ntestdl = data.DataLoader(valds, **params)\r\n\r\n\r\ny_range = (0, y_train.max()*1.2)\r\nprint(\"Y-Range:\\t\", y_range)\r\n\r\ncat_size = [(c, data_full[c].max()+1) for c in categorical_features]\r\nprint(\"cat_size:\\t\", cat_size)\r\nemb_sizes = [(c, min(50, (c+1)//2)) for _,c in cat_size]\r\nprint(\"emb_sizes=\\t\", emb_sizes)\r\n\r\n# %% Setup Model\r\nmodel_name = 'FinalPricePred2'\r\n\r\nresult_path = model_name + '.csv'\r\n \r\ncsvFile =open(result_path, 'a', newline='' ,encoding=\"utf-8\" )\r\nwriter = csv.writer(csvFile)\r\nif os.stat(result_path).st_size == 0:\r\n writer.writerow(headers) \r\n\r\n\r\nmodel = MixedInputModel(emb_sizes=emb_sizes, \r\n n_cont=len(data_full.columns)-len(categorical_features), \r\n emb_drop=0.04, \r\n out_size=1, \r\n sizes=[1000,500,250], \r\n drops=[0.001, 0.01,0.01], \r\n y_range=y_range).to(device)\r\nprint(model)\r\noptimizer = optimizerim.Adam(model.parameters(), 1e-2)\r\n#optimizer = optimizerim.SGD(model.parameters(), lr=0.01, momentum=0.9)\r\nlr_cosine = lr_scheduler.CosineAnnealingLR(optimizer, 1000)\r\n\r\nnum_epoches = 150\r\nmodel = train(model=model, train_dl=traindl, val_dl=valdl, loss_fn=F.mse_loss, optimizer=optimizer, scheduler=lr_cosine, num_epochs=num_epoches)\r\ntorch.save(model.state_dict(), '/data/evan/modelsPP/' + model_name + '.pth')\r\nmodel.load_state_dict(torch.load('/data/evan/modelsPP/' + model_name + '.pth'))\r\nmodel.eval()\r\n\r\ny_true_val = list()\r\ny_pred_val = list()\r\npredict = []\r\ntruth = []\r\ndiff = []\r\nfor cat, cont, y in testdl:\r\n\r\n cat = cat.to(device)\r\n cont = cont.to(device)\r\n y = y.to(device)\r\n pred = model(cat, cont)\r\n for i in range(len(pred.cpu().data.numpy())):\r\n p = np.exp(pred.cpu().data.numpy()[i])\r\n t = np.exp(y.cpu().data.numpy()[i])\r\n print(\"Predict: \",p, \"\\tTrue: \",t)\r\n predict.append(p)\r\n truth.append(t)\r\n diff.append(abs(p-t))\r\n\r\n y_true_val += list(y.cpu().data.numpy())\r\n y_pred_val += list(pred.cpu().data.numpy())\r\nval_rmse = rmse(y_true_val, y_pred_val)\r\n\r\nresults_test = pd.DataFrame(\r\n {'Prediction': predict,\r\n 'Truth': truth,\r\n 'Difference':diff\r\n })\r\n \r\n \r\nresults_test.to_csv('/home/evan/PP_ResultsTest2.csv', encoding='utf-8')\r\nprint('Test RMSE: {:.4f}'.format(val_rmse))\r\n\r\n# Close CSV File\r\ncsvFile.close()\r\n", "sub_path": "DL_PricePrediction.py", "file_name": "DL_PricePrediction.py", "file_ext": "py", "file_size_in_byte": 15554, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "torch.device", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 31, "usage_type": "attribute"}, {"api_name": "calendar.month_abbr", "line_number": 34, "usage_type": "attribute"}, {"api_name": "datetime.date", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.manual_seed", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 54, "usage_type": "name"}, {"api_name": "torch.utils.data.iterrows", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 57, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.utils.data.at", "line_number": 66, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 66, "usage_type": "name"}, {"api_name": "torch.utils.data.at", "line_number": 68, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 68, "usage_type": "name"}, {"api_name": "torch.utils.data.at", "line_number": 70, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 70, "usage_type": "name"}, {"api_name": "torch.utils.data", "line_number": 72, "usage_type": "name"}, {"api_name": "torch.utils.data", "line_number": 78, "usage_type": "argument"}, {"api_name": "torch.utils.data.columns", "line_number": 79, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 79, "usage_type": "name"}, {"api_name": "torch.utils.data.isnull", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 82, "usage_type": "name"}, {"api_name": "torch.utils.data", "line_number": 87, "usage_type": "name"}, {"api_name": "torch.utils.data.drop", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 91, "usage_type": "name"}, {"api_name": "torch.utils.data.drop", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 92, "usage_type": "name"}, {"api_name": "torch.utils.data.drop", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 94, "usage_type": "name"}, {"api_name": "torch.utils.data.isnull", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 97, "usage_type": "name"}, {"api_name": "torch.utils.data.drop", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 102, "usage_type": "name"}, {"api_name": "torch.utils.data.isnull", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 107, "usage_type": "name"}, {"api_name": "torch.utils.data", "line_number": 113, "usage_type": "name"}, {"api_name": "torch.utils.data.dropna", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.utils.data.columns", "line_number": 114, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 114, "usage_type": "name"}, {"api_name": "torch.utils.data", "line_number": 116, "usage_type": "name"}, {"api_name": "pandas.to_numeric", "line_number": 117, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 117, "usage_type": "name"}, {"api_name": "torch.utils.data", "line_number": 119, "usage_type": "name"}, {"api_name": "numpy.log", "line_number": 122, "usage_type": "call"}, {"api_name": "torch.utils.data.price", "line_number": 122, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 122, "usage_type": "name"}, {"api_name": "torch.utils.data.drop", "line_number": 123, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 123, "usage_type": "name"}, {"api_name": "torch.utils.data.isnull", "line_number": 128, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 128, "usage_type": "name"}, {"api_name": "torch.utils.data", "line_number": 131, "usage_type": "argument"}, {"api_name": "torch.utils.data.columns", "line_number": 132, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 132, "usage_type": "name"}, {"api_name": "torch.utils.data.columns", "line_number": 138, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 138, "usage_type": "name"}, {"api_name": "torch.utils.data", "line_number": 141, "usage_type": "name"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 142, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 142, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 144, "usage_type": "name"}, {"api_name": "torch.utils.data", "line_number": 149, "usage_type": "name"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 153, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 153, "usage_type": "argument"}, {"api_name": "numpy.exp", "line_number": 156, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 158, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 158, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 164, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 165, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 167, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 167, "usage_type": "name"}, {"api_name": "numpy.stack", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 172, "usage_type": "attribute"}, {"api_name": "numpy.stack", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 173, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 174, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 183, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 183, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 190, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 190, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 194, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 194, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 194, "usage_type": "call"}, {"api_name": "torch.nn.ModuleList", "line_number": 200, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 200, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 200, "usage_type": "call"}, {"api_name": "torch.nn.ModuleList", "line_number": 201, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 201, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 201, "usage_type": "call"}, {"api_name": "torch.nn.init.kaiming_normal_", "line_number": 204, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 204, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 204, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 205, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 205, "usage_type": "name"}, {"api_name": "torch.nn.init.kaiming_normal_", "line_number": 206, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 206, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 206, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 208, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 208, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 209, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 209, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 209, "usage_type": "call"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 210, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 210, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 218, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 222, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 224, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 224, "usage_type": "name"}, {"api_name": "torch.sigmoid", "line_number": 229, "usage_type": "call"}, {"api_name": "time.time", "line_number": 235, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 302, "usage_type": "call"}, {"api_name": "time.time", "line_number": 311, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 327, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 345, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 381, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 381, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 382, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 382, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 383, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 383, "usage_type": "name"}, {"api_name": "csv.writer", "line_number": 400, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 401, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 413, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 413, "usage_type": "name"}, {"api_name": "torch.optim.lr_scheduler.CosineAnnealingLR", "line_number": 415, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler", "line_number": 415, "usage_type": "name"}, {"api_name": "torch.nn.functional.mse_loss", "line_number": 418, "usage_type": "attribute"}, {"api_name": "torch.nn.functional", "line_number": 418, "usage_type": "name"}, {"api_name": "torch.save", "line_number": 419, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 420, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 435, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 436, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 446, "usage_type": "call"}]} +{"seq_id": "417717659", "text": "# Tuple\n# - 튜플은 불변 리스트 이지만, 필드명이 없는 레코드로 사용할 수도 있다\n\n# 1. 레코드로서의 튜플\n# - 튜플을 필드의 집합으로 사용하는 경우에는 항목 수가 고정되어 있고 항목의 순서가 중요하다\nimport collections\n\nlax_coordinates = (33.9452, -118.12341)\ncitiy, year, pop, chg, area = ('Tokyo', 200, 32450, 0.66, 8014)\ntraveler_ids = [('USA', '31195855'), ('BRA', 'CE342567'), ('ESP', 'SDA201010')]\n\nfor passport in sorted(traveler_ids):\n print('%s/%s' % passport)\n\n# 튜플 언팩킹 -> 병렬 할당\nfor country, _ in traveler_ids:\n print(country)\n\n# 2.튜플 언팩킹\n# - 병렬 할당\nlatitude, logitude = lax_coordinates\n\n# - 튜플 언팩킹을 이용하면 임시 변수를 사용하지 않고 두 변수의 값을 서로 교환할 수 있다\na = 3\nb = 2\nb, a = a, b\n\n# - *를 붙여 튜플을 언패킹할수 있다\ndivmod(20, 8) # 이걸\nt = (20, 8)\ndivmod(*t) # 이렇게 할 수도있다\nquotient, remainder = divmod(*t) # 당연히 이렇게도 된다\n\n# - 언팩킹의 다른 사용 방법\nimport os\n\n_, filename = os.path.split('/home/hyun/.ssh/dirsa.pub') # os.path.split을 하면 파일과 폴더 부분을 잘라준다\n\n# - 튜플을 언팩킹할때 일부 항목에만 관심이 있는 경우에는 *를 사용할수 있다\na, b, *rest = range(5) # (0, 1, [2, 3, 4])\na, b, *rest = range(3) # (0, 1, [2])\na, b, *rest = range(2) # (0, 1, [])\n\n# - 병렬 할당의 경우 * 는 단 하나의 변수에만 적용할 수 있다\na, *body, c, d = range(5) # (0, [1, 2], 3, 4)\n*head, b, c, d = range(5) # ([0, 1], 2, 3, 4)\n\n# - 내포된 튜플 언패킹 (튜플 안에 튜플 언팩킹)\nmetro_areas = [\n ('A', 'a', 1.123, (2.123456, 3.123456)),\n ('B', 'b', 2.123, (3.123456, 4.123456)),\n ('C', 'c', 3.123, (4.123456, 5.123456)),\n ('D', 'd', 4.123, (5.123456, 6.123456)),\n ('E', 'e', 5.123, (6.123456, 7.123456)),\n]\n\nprint('{:15} | {:^9} | {:^9}'.format('', 'lat.', 'long.'))\nfmt = '{:15} | {:9.4f} | {:9.4f}'\nfor name, cc, pop, (latitude, logitude) in metro_areas:\n if logitude >= 3:\n print(fmt.format(name, latitude, logitude))\n\n# - 명명된 튜플\n# - collections.namedtuple() 함수는 필드명과 클래스명을 추가한 튜플의 서브클래스를 생성하는 팩토리 함수로서, 디벙깅에 유용하다\nfrom collections import namedtuple\n# - namedtuple은 객체를 반환한다\n# - 만드는 방법 namedtuple('객체이름','필드명1 필드명2')\nCity = namedtuple('City', 'name country population coordinates')\n# City = namedtuple('City', ['name', 'country', 'population', 'coordinates']) # 위와 상동\ntokyo = City('Tokyo', 'JP', 36.933, (35.68922, 139.123123))\nprint(tokyo)\n\nCard = namedtuple('Card', ['rank', 'suit'])\nCard2 = namedtuple('Card', 'rank suit')\na = Card('high', \"d\")\nb = Card2('high', \"d\")\nprint(a)\nprint(b)\n\n# - 불변 리스트로서의 튜플\n# - 튜플은 항목을 추가하거나 삭제하는 기능 및 __reversed__를 제외하고 리스트가 제공하는 메서드를 모두 지원한다\n\n", "sub_path": "02_Struct/04_tuple.py", "file_name": "04_tuple.py", "file_ext": "py", "file_size_in_byte": 3113, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "os.path.split", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "collections.namedtuple", "line_number": 68, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 73, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 74, "usage_type": "call"}]} +{"seq_id": "209396560", "text": "from tensorflow.keras.models import Sequential, Model\nfrom tensorflow.keras.layers import Dense, Input, concatenate, Concatenate, Conv1D, Flatten, MaxPool1D, Dropout, GlobalAveragePooling1D\nfrom tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau\nfrom tensorflow.keras.utils import to_categorical\nfrom sklearn.datasets import load_breast_cancer\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import r2_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler, MaxAbsScaler, RobustScaler, StandardScaler, QuantileTransformer, PowerTransformer\nfrom sklearn.preprocessing import OneHotEncoder\nfrom tensorflow.keras.optimizers import Adam, Adagrad, Adamax, Adadelta\nfrom tensorflow.keras.optimizers import RMSprop, SGD, Nadam\nimport time\n\ndatasets = load_breast_cancer()\n\n# print(datasets.DESCR)\n# print(datasets.feature_names)\n\n\n#1. 데이터\nx = datasets.data\ny = datasets.target\n\nprint(x.shape, y.shape) #(569, 30) (569,)\n\n# print(y[:20]) # y가 0과 1인, 2진 분류\n# print(np.unique(y))\n\n\n# 데이터 전처리\nx_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.7, shuffle=True, random_state=66)\n\nscaler = StandardScaler()\nscaler.fit(x_train) # 훈련\nx_train = scaler.transform(x_train) # 변환\nx_test = scaler.transform(x_test)\n\nx_train = x_train.reshape(x_train.shape[0], x_train.shape[1], 1)\nx_test = x_test.reshape(x_test.shape[0], x_test.shape[1], 1)\n\nprint(x_train.shape) #(398, 30, 1)\nprint(x_test.shape) #(171, 30, 1)\n\n\n#2. 모델 구성\nmodel = Sequential()\nmodel.add(Conv1D(filters=8, kernel_size=2, padding='same', input_shape=(30,1)))\nmodel.add(Dropout(0.2))\nmodel.add(Conv1D(8, 2, padding='same', activation='relu')) \nmodel.add(MaxPool1D())\n\nmodel.add(Conv1D(32, 2, padding='same', activation='relu')) \nmodel.add(Dropout(0.2))\nmodel.add(Conv1D(32, 2, padding='same', activation='relu')) \nmodel.add(MaxPool1D())\n\nmodel.add(Conv1D(128, 2, padding='same', activation='relu'))\nmodel.add(Dropout(0.2))\nmodel.add(Conv1D(128, 2, padding='same', activation='relu')) \nmodel.add(MaxPool1D())\n\n# model.add(Flatten()) \n# model.add(Dense(128, activation='relu')) \n# model.add(Dropout(0.2))\n# model.add(Dense(128, activation='relu')) \n# model.add(Dropout(0.2))\n# model.add(Dense(128, activation='relu')) \nmodel.add(GlobalAveragePooling1D())\nmodel.add(Dense(1, activation=\"sigmoid\"))\n\n#3. 컴파일, 훈련\noptimizer = Adam(lr=0.001)\n\nmodel.compile(loss='mse', optimizer=optimizer, metrics=['acc'])\n\nes = EarlyStopping(monitor='val_loss', patience=15, mode='min', verbose=1)\nreduce_lr = ReduceLROnPlateau(monitor='val_loss', patience=5, mode='auto', verbose=1, factor=0.5)\n\nstart_time = time.time()\nhist = model.fit(x_train, y_train, epochs=300, batch_size=32, validation_split=0.25, callbacks=[es, reduce_lr])\nend_time = time.time() - start_time\n\n\n#4. 평가, 예측\ny_predict = model.predict([x_test])\n\nloss = model.evaluate(x_test, y_test)\nprint(\"time = \", end_time)\nprint('loss : ', loss)\nr2 = r2_score(y_test, y_predict)\nprint('R^2 score : ', r2)\n\n\n\n'''\ndnn\n# loss: 2.6394e-09 - accuracy: 1.0000\n\ncnn\ntime = 46.88242268562317\nloss : 0.02056293934583664\nR^2 score : 0.9103903276773451\n\nreduce_lr\ntime = 25.694993019104004\nloss : [0.04111980274319649, 0.9415204524993896]\nR^2 score : 0.820807128061524\n'''\n", "sub_path": "keras2/keras63_ReduceLR_4_cancer.py", "file_name": "keras63_ReduceLR_4_cancer.py", "file_ext": "py", "file_size_in_byte": 3334, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "sklearn.datasets.load_breast_cancer", "line_number": 16, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 33, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv1D", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 50, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv1D", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.MaxPool1D", "line_number": 52, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv1D", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv1D", "line_number": 56, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.MaxPool1D", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv1D", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv1D", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.MaxPool1D", "line_number": 62, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.GlobalAveragePooling1D", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 74, "usage_type": "call"}, {"api_name": "tensorflow.keras.callbacks.EarlyStopping", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.keras.callbacks.ReduceLROnPlateau", "line_number": 79, "usage_type": "call"}, {"api_name": "time.time", "line_number": 81, "usage_type": "call"}, {"api_name": "time.time", "line_number": 83, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 92, "usage_type": "call"}]} +{"seq_id": "185146494", "text": "import sys, os\nfrom collections import OrderedDict\nimport pprint\nimport inspect\nimport re\n\nclass Attribute:\n def __init__(self, attrviewer=False, itercount=None, iterdepth=None):\n self.attrviewer = attrviewer\n self.itercount = itercount\n self.iterdepth = iterdepth\n\n def rc_get(self, obj, memory='', depth=0):\n if not self.iterdepth:\n if hasattr(obj, '__dict__') and len(obj.__dict__) != 0:\n self._rc_get_obj(obj, memory, depth)\n elif isinstance(obj, list):\n self._rc_get_list(obj, memory, depth)\n elif isinstance(obj, dict):\n self._rc_get_dict(obj, memory, depth)\n else:\n if depth < self.iterdepth:\n if hasattr(obj, '__dict__') and len(obj.__dict__) != 0:\n self._rc_get_obj(obj, memory, depth)\n elif isinstance(obj, list):\n self._rc_get_list(obj, memory, depth)\n elif isinstance(obj, dict):\n self._rc_get_dict(obj, memory, depth)\n\n elif depth >= self.iterdepth:\n return None\n\n\n def _rc_get_obj(self, obj, memory='', depth=0):\n if hasattr(obj, '__dict__') and len(obj.__dict__) != 0:\n ic = -1\n for key, attr in vars(obj).items():\n if not self.itercount:\n if self.attrviewer:\n if not len(memory):\n print(f'.{key} : {attr}')\n else:\n print(f'{memory}.{key} : {attr}')\n else:\n if not len(memory):\n print(f'.{key}')\n else:\n print(f'{memory}.{key}')\n else:\n ic += 1\n if ic == self.itercount:\n break\n\n if self.attrviewer:\n if not len(memory):\n print(f'.{key} : {attr}')\n else:\n print(f'{memory}.{key} : {attr}')\n else:\n if not len(memory):\n print(f'.{key}')\n else:\n print(f'{memory}.{key}')\n\n self.spliter(n=depth)\n for key, attr in vars(obj).items():\n self.rc_get(attr, memory=memory+'.'+str(key), depth=depth+1)\n \n else:\n return None\n\n\n\n def _rc_get_list(self, obj, memory='', depth=0):\n if isinstance(obj, list):\n ic = -1\n for key, attr in enumerate(obj):\n if not self.itercount:\n if self.attrviewer:\n if not len(memory):\n print(f'[{key}] : {attr}')\n else:\n print(f'{memory}[{key}] : {attr}')\n else:\n if not len(memory):\n print(f'[{key}]')\n else:\n print(f'{memory}[{key}]')\n\n else:\n ic += 1\n if ic == self.itercount:\n break\n\n if self.attrviewer:\n if not len(memory):\n print(f'[{key}] : {attr}')\n else:\n print(f'{memory}[{key}] : {attr}')\n else:\n if not len(memory):\n print(f'[{key}]')\n else:\n print(f'{memory}[{key}]')\n\n self.spliter(n=depth)\n for key, attr in enumerate(obj):\n self.rc_get(attr, memory=memory+'['+str(key)+']', depth=depth+1)\n \n else:\n return None\n\n\n\n def _rc_get_dict(self, obj, memory='', depth=0):\n if isinstance(obj, (dict, OrderedDict)):\n ic = -1\n for key, attr in obj.items():\n if not self.itercount:\n if self.attrviewer:\n if not len(memory):\n if isinstance(key, str):\n print(f'[\"{key}\"] : {attr}')\n else:\n print(f'[{key}] : {attr}')\n else:\n if isinstance(key, str):\n print(f'{memory}[\"{key}\"] : {attr}')\n else: \n print(f'{memory}[{key}] : {attr}')\n else:\n if not len(memory):\n if isinstance(key, str):\n print(f'[\"{key}\"]')\n else:\n print(f'[{key}]')\n else:\n if isinstance(key, str):\n print(f'{memory}[\"{key}\"]')\n else: \n print(f'{memory}[{key}]')\n \n else:\n ic += 1\n if ic == self.itercount:\n break\n \n if self.attrviewer:\n if not len(memory):\n if isinstance(key, str):\n print(f'[\"{key}\"] : {attr}')\n else:\n print(f'[{key}] : {attr}')\n else:\n if isinstance(key, str):\n print(f'{memory}[\"{key}\"] : {attr}')\n else: \n print(f'{memory}[{key}] : {attr}')\n else:\n if not len(memory):\n if isinstance(key, str):\n print(f'[\"{key}\"]')\n else:\n print(f'[{key}]')\n else:\n if isinstance(key, str):\n print(f'{memory}[\"{key}\"]')\n else: \n print(f'{memory}[{key}]')\n\n\n self.spliter(n=depth)\n for key, attr in obj.items():\n if isinstance(key, str):\n self.rc_get(attr, memory=memory+'[\"'+str(key)+'\"]', depth=depth+1)\n else:\n self.rc_get(attr, memory=memory+'['+str(key)+']', depth=depth+1)\n else:\n return None\n\n @staticmethod\n def spliter(n, turn=False):\n split_line = '-'*(100 - 5*n)\n if turn==True:\n print(split_line)\n\n\n\nclass logtrace:\n def __init__(self, func):\n self.func = func\n \n def __call__(self, *args, **kwargs):\n if not os.path.isdir('.DebuggingLog'):\n os.mkdir('.DebuggingLog')\n num = 0\n elif not os.listdir('.DebuggingLog/'):\n num = 0\n else:\n loglist = os.listdir('.DebuggingLog/')\n \n lognumbers = []\n for log in loglist:\n if re.search(r'debugging\\.log', log):\n lognumbers.append(int(log[13:]))\n if len(lognumbers) == 0:\n num = 0\n else:\n num = max(lognumbers) + 1\n\n stdout_restore = sys.stdout # Save the current stdout so that we can revert sys.stdou after we complete\n sys.stdout = open(f'.DebuggingLog/debugging.log{num}', 'w') # Redirect sys.stdout to the file\n \n attrviewer = kwargs['attrviewer']\n itercount = kwargs['itercount']\n iterdepth = kwargs['iterdepth']\n logs = kwargs['logs'] # logs : self.logs in Debugger\n lognames = kwargs['lognames'] # lognames : self.namespace in Debugger\n \n print('* FILE NAME :', sys.argv[0])\n print('* BREAK POINT', set(logs.keys()))\n for key in logs:\n obj = logs[key]\n print(f' * {key} : 0~{len(obj)-1}')\n \n print('\\n* [1]-----------------------------------------------DETAILS INFO(attributes)-------------------------------------------*')\n attribute = Attribute(attrviewer=attrviewer, itercount=itercount, iterdepth=iterdepth)\n\n for key, values in logs.items():\n for i, obj in enumerate(values):\n print(f'\\n[{key}][{i}] - {i}th object')\n print(f'===========================')\n print(attribute.rc_get(obj))\n\n print('\\n* [2]-----------------------------------------------DETAILS INFO(method)-------------------------------------------*')\n\n for key, values in logs.items():\n for i, obj in enumerate(values):\n print(f'\\n[{key}][{i}] - {i}th object')\n print(f'===========================')\n for idx, method in enumerate(inspect.getmembers(obj, inspect.ismethod)):\n print(f'\\n[{key}][{i}][{idx}] : obj.{method[0]}(*args, **kwargs)')\n print(method)\n print(inspect.getsource(getattr(obj, f'{method[0]}')))\n\n \n\n\n print('\\n* [3]-----------------------------------------------------FINAL LOG---------------------------------------------------*')\n \n for key, values in logs.items():\n for i, obj in enumerate(values):\n print(f'\\n[{key}][{i}] - {i}th object')\n print(f'===========================')\n pprint.pprint(obj)\n\n sys.stdout.close() # Close the file\n sys.stdout = stdout_restore # Restore sys.stdout to our old saved file handler \n print(f'.DebuggingLog/debugging.log{num} file was sucessfully created!')\n return self.func(*args, **kwargs)\n\n\n\nclass Debugger:\n def __init__(self, attrviewer=False, itercount=None, iterdepth=None):\n self.attrviewer = attrviewer\n self.itercount = itercount\n self.iterdepth = iterdepth\n self.attribute = list()\n self.forlooplog = list()\n self.namespace = list()\n self.logs = OrderedDict()\n self.callcount = -1\n\n def __call__(self, *obj, **kwargs):\n self.callcount += 1\n self.logs[kwargs['logname']] = obj\n\n self.forlooplog.append(obj)\n if 'logname' in kwargs:\n self.namespace.append(f'[debug{self.callcount}] '+kwargs['logname'])\n else:\n self.namespace.append(f'[debug{self.callcount}] Untitled')\n\n def __del__(self):\n for name in set(self.namespace):\n if name[-8:] != 'Untitled':\n pass\n self.logwriter(self.forlooplog,\n lognames=self.namespace,\n logs=self.logs,\n attrviewer=self.attrviewer,\n itercount=self.itercount,\n iterdepth=self.iterdepth)\n\n @logtrace\n def logwriter(self, *args, **kwargs):\n pass\n\n\n\n\ndef main():\n dic = {1:1, 2:2, 3:3, 4:[1,2,3,4,5], 5:{'a':1, 'b':2, 'c':{3:3, 'a':1, 'b':2, 'ed':23}}}\n\n debugger = Debugger(attrviewer=True, itercount=None, iterdepth=2)\n debugger(dic, logname='here')\n del debugger\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "tutorials/debugging.py", "file_name": "debugging.py", "file_ext": "py", "file_size_in_byte": 11527, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "collections.OrderedDict", "line_number": 116, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 194, "usage_type": "call"}, {"api_name": "os.path", "line_number": 194, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 195, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 197, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 200, "usage_type": "call"}, {"api_name": "re.search", "line_number": 204, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 211, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 212, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 220, "usage_type": "attribute"}, {"api_name": "inspect.getmembers", "line_number": 241, "usage_type": "call"}, {"api_name": "inspect.ismethod", "line_number": 241, "usage_type": "attribute"}, {"api_name": "inspect.getsource", "line_number": 244, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 255, "usage_type": "call"}, {"api_name": "sys.stdout.close", "line_number": 257, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 257, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 258, "usage_type": "attribute"}, {"api_name": "collections.OrderedDict", "line_number": 272, "usage_type": "call"}]} +{"seq_id": "549971650", "text": " #!usr/bin/python\n\nfrom Bio import SeqIO\nimport sqlite3\nfrom collections import Counter\nimport sys\n\ndef set_org_name_dict(orthomcl_codenames):\n 'dict of orthomcl taxa_ids and original organism names'\n orgs=open(orthomcl_codenames).readlines()\n org_dict={}\n for o in orgs:\n vec=o.split(\"\\t\")\n organism=vec[0].split(\"/\")[-1].replace(\"fasta\",\"\").replace(\"faa\",\"\")\n code=vec[1].replace(\"\\n\",\"\")\n org_dict[code]=organism\n return org_dict\n\nclass Protein():\n ortho_name_dict=set_org_name_dict( \"/nobackup1/jbrown/annotation/orthomcl/host_v_host/host_taxon_codes.txt\")\n \n def __init__(self, name):\n self.name=name\n \n def faa_name(self):\n return self.name.split(\"|\")[1]\n \n def locus_tag(self):\n contig=self.faa_name().split(\"_\")[2]\n number=self.faa_name().split(\"_\")[3]\n return \"ORF_%s_%s\" % (contig, number)\n \n def gbk_file(self):\n code=self.name.split(\"|\")[0]\n long_name=name_dict[code].replace(\"_cds_prod.\",\"_contigs*_prod.gbk\")\n path=\"/nobackup1/jbrown/vibrio_genomes/gbk/\"\n return path+long_name\n \n def product(self):\n conn=sqlite3.connect('/pool001/jbrown/HRX_Vibrio_lt_db.sqlite')\n c=conn.cursor()\n c.execute(\"SELECT product from products where locus_tag='%s'\" % self.faa_name())\n output=c.fetchone()\n if output==None:\n result=\"not in db\"\n else:\n result=output[0]\n conn.close()\n return result\n \ndef create_group_dict(group_file):\n group_dict={}\n with open(group_file) as infile:\n for line in infile:\n cluster=line.split(\":\")[0]\n proteins=line.split(\":\")[1].replace(\"\\n\",\"\").split(\" \")[1:]\n group_dict[cluster]=proteins\n return group_dict\n\ndef get_all_group_annotations(group, group_dict):\n proteins=group_dict[group]\n products=[]\n for p in proteins:\n pobj=Protein(p)\n products.append(pobj.product())\n return products\n\ndef get_most_common_group_annotation(group, group_dict):\n proteins=group_dict[group]\n products=[]\n for p in proteins:\n pobj=Protein(p)\n products.append(pobj.product())\n prod_count=Counter(products)\n mc=prod_count.most_common(1)\n return list(mc)[0][0]\n\ndef write_best_hits(ortho_group_file, cluster_list, output_file):\n clusters=open(cluster_list).readlines()\n group_dict=create_group_dict(ortho_group_file)\n out=open(output_file,\"w\")\n out.write(\"cluster\\tmost_common_annotation\\tgroup_size\\n\")\n for c in clusters:\n clust=c.replace(\"\\n\",\"\")\n best=get_most_common_group_annotation(clust, group_dict)\n num_prots=len(group_dict[clust])\n out.write(\"%s\\t%s\\t%s\\n\" % (clust, best, num_prots))\n out.close()\n\ncluster_list=\"/nobackup1/jbrown/annotation/orthomcl/host_v_host/hi_mi_hostprot_phage_pairs.txt\"\noutput=\"/nobackup1/jbrown/annotation/orthomcl/host_v_host/himi_cluster_annotations_from_gbks.txt\"\northo_group_file=\"/nobackup1/jbrown/annotation/orthomcl/host_v_host/groups.txt\"\n\nwrite_best_hits(ortho_group_file, cluster_list, output)\n \n", "sub_path": "scripts/get_ortho_group_products.py", "file_name": "get_ortho_group_products.py", "file_ext": "py", "file_size_in_byte": 3135, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "sqlite3.connect", "line_number": 40, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 74, "usage_type": "call"}]} +{"seq_id": "256807560", "text": "import os, sys\nfrom pathlib import Path\n\nfrom torch import autograd, optim\n\nfrom npbg.pipelines import Pipeline\nfrom npbg.datasets.dynamic import get_datasets\nfrom npbg.models.texture import PointTexture, MeshTexture\nfrom npbg.models.unet import UNet\nfrom npbg.models.compose import NetAndTexture, MultiscaleNet, RGBTexture\nfrom npbg.criterions.vgg_loss import VGGLoss\nfrom npbg.utils.train import to_device, set_requires_grad, save_model, unwrap_model, image_grid, to_numpy, load_model_checkpoint, freeze\nfrom npbg.utils.perform import TicToc, AccumDict, Tee\n\n\nTextureOptimizerClass = optim.RMSprop\n\n\ndef get_net(input_channels, args):\n net = UNet(\n num_input_channels=input_channels, \n num_output_channels=3, \n feature_scale=args.net_size, \n more_layers=0, \n upsample_mode='bilinear', \n norm_layer='bn', \n last_act='', \n conv_block=args.conv_block\n )\n\n return net\n\n\ndef get_texture(num_channels, size, args):\n if not hasattr(args, 'reg_weight'):\n args.reg_weight = 0.\n\n if args.use_mesh:\n texture = MeshTexture(num_channels, size, activation=args.texture_activation, reg_weight=args.reg_weight)\n else:\n texture = PointTexture(num_channels, size, activation=args.texture_activation, reg_weight=args.reg_weight)\n\n if args.texture_ckpt:\n texture = load_model_checkpoint(args.texture_ckpt, texture)\n\n return texture\n\n\ndef backward_compat(args):\n if not hasattr(args, 'input_channels'):\n args.input_channels = None\n if not hasattr(args, 'conv_block'):\n args.conv_block = 'gated'\n\n if args.pipeline == 'npbg.pipelines.ogl.Pix2PixPipeline':\n if not hasattr(args, 'input_modality'):\n args.input_modality = 1\n\n return args\n\n\nclass TexturePipeline(Pipeline):\n def export_args(self, parser):\n parser.add_argument('--descriptor_size', type=int, default=8)\n parser.add_argument('--texture_size', type=int)\n parser.add_argument('--texture_ckpt', type=Path)\n parser.add('--texture_lr', type=float, default=1e-1)\n parser.add('--texture_activation', type=str, default='none')\n parser.add('--n_points', type=int, default=0, help='this is for inference')\n\n def create(self, args):\n args = backward_compat(args)\n\n if not args.input_channels:\n args.input_channels = [args.descriptor_size] * args.num_mipmap\n\n net = get_net(args.input_channels, args)\n\n textures = {}\n\n if args.inference:\n if args.use_mesh:\n size = args.texture_size\n else:\n size = args.n_points\n textures = {\n 0: get_texture(args.descriptor_size, size, args)\n }\n else:\n self.ds_train, self.ds_val = get_datasets(args)\n\n for ds in self.ds_train:\n if args.use_mesh:\n assert args.texture_size, 'set texture size'\n size = args.texture_size\n else:\n assert ds.scene_data['pointcloud'] is not None, 'set pointcloud'\n size = ds.scene_data['pointcloud']['xyz'].shape[0]\n textures[ds.id] = get_texture(args.descriptor_size, size, args)\n\n self.optimizer = optim.Adam(net.parameters(), lr=args.lr)\n\n if len(textures) == 1:\n self._extra_optimizer = TextureOptimizerClass(textures[0].parameters(), lr=args.texture_lr)\n else:\n self._extra_optimizer = None\n\n self.criterion = args.criterion_module(**args.criterion_args).cuda()\n\n ss = args.supersampling if hasattr(args, 'supersampling') else 1\n\n self.net = net\n self.textures = textures\n self.model = NetAndTexture(net, textures, ss)\n\n self.args = args\n\n def state_objects(self):\n datasets = self.ds_train\n\n objs = {'net': self.net}\n objs.update({ds.name: self.textures[ds.id] for ds in datasets})\n\n return objs\n\n def dataset_load(self, dataset):\n self.model.load_textures([ds.id for ds in dataset])\n \n for ds in dataset:\n ds.load()\n\n\n def extra_optimizer(self, dataset):\n # if we have single dataset, don't recreate optimizer\n if self._extra_optimizer is not None:\n lr_drop = self.optimizer.param_groups[0]['lr'] / self.args.lr\n self._extra_optimizer.param_groups[0]['lr'] = self.args.texture_lr * lr_drop\n return self._extra_optimizer\n\n param_group = []\n for ds in dataset:\n param_group.append(\n {'params': self.textures[ds.id].parameters()}\n )\n\n lr_drop = self.optimizer.param_groups[0]['lr'] / self.args.lr\n\n return TextureOptimizerClass(param_group, lr=self.args.texture_lr * lr_drop)\n\n def dataset_unload(self, dataset):\n self.model.unload_textures()\n\n for ds in dataset:\n ds.unload()\n self.textures[ds.id].null_grad()\n\n def get_net(self):\n return self.net\n\n\nclass Pix2PixPipeline(Pipeline):\n def export_args(self, parser):\n parser.add('--input_modality', type=int, default=1)\n\n def create(self, args):\n args = backward_compat(args)\n\n if not args.input_channels:\n print('Assume input channels is 3')\n args.input_channels = [3] * args.num_mipmap\n\n net = get_net(args.input_channels, args)\n\n self.model = MultiscaleNet(net, args.input_modality)\n self.net = net\n\n if not args.inference:\n self.ds_train, self.ds_val = get_datasets(args)\n\n \n self.optimizer= optim.Adam(self.model.parameters(), lr=args.lr)\n \n\n self.criterion = args.criterion_module(**args.criterion_args).cuda()\n\n def state_objects(self):\n return {'net': self.net}\n\n def dataset_load(self, dataset):\n for ds in dataset:\n ds.load()\n\n\n def dataset_unload(self, dataset):\n for ds in dataset:\n ds.unload()\n \n\n def get_net(self):\n return self.net\n\n\nclass RGBTexturePipeline(Pipeline):\n def export_args(self, parser):\n parser.add('--texture_size', type=int, default=2048)\n parser.add('--texture_lr', type=float, default=1e-2)\n\n def create(self, args):\n self.texture = MeshTexture(3, args.texture_size, activation='none', levels=1, reg_weight=0)\n self.model = RGBTexture(self.texture)\n\n if not args.inference:\n self.ds_train, self.ds_val = get_datasets(args)\n\n self.optimizer = TextureOptimizerClass(self.texture.parameters(), lr=args.texture_lr)\n\n self.criterion = args.criterion_module(**args.criterion_args).cuda()\n\n def dataset_load(self, dataset):\n for ds in dataset:\n ds.load()\n\n def dataset_unload(self, dataset):\n for ds in dataset:\n ds.unload()\n\n def state_objects(self):\n return {'model': self.model}\n\n def get_net(self):\n return self.model\n", "sub_path": "npbg/pipelines/ogl.py", "file_name": "ogl.py", "file_ext": "py", "file_size_in_byte": 7071, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "torch.optim.RMSprop", "line_number": 16, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 16, "usage_type": "name"}, {"api_name": "npbg.models.unet.UNet", "line_number": 20, "usage_type": "call"}, {"api_name": "npbg.models.texture.MeshTexture", "line_number": 39, "usage_type": "call"}, {"api_name": "npbg.models.texture.PointTexture", "line_number": 41, "usage_type": "call"}, {"api_name": "npbg.utils.train.load_model_checkpoint", "line_number": 44, "usage_type": "call"}, {"api_name": "npbg.pipelines.Pipeline", "line_number": 62, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 66, "usage_type": "name"}, {"api_name": "npbg.datasets.dynamic.get_datasets", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 101, "usage_type": "name"}, {"api_name": "npbg.models.compose.NetAndTexture", "line_number": 114, "usage_type": "call"}, {"api_name": "npbg.pipelines.Pipeline", "line_number": 161, "usage_type": "name"}, {"api_name": "npbg.models.compose.MultiscaleNet", "line_number": 174, "usage_type": "call"}, {"api_name": "npbg.datasets.dynamic.get_datasets", "line_number": 178, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 181, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 181, "usage_type": "name"}, {"api_name": "npbg.pipelines.Pipeline", "line_number": 203, "usage_type": "name"}, {"api_name": "npbg.models.texture.MeshTexture", "line_number": 209, "usage_type": "call"}, {"api_name": "npbg.models.compose.RGBTexture", "line_number": 210, "usage_type": "call"}, {"api_name": "npbg.datasets.dynamic.get_datasets", "line_number": 213, "usage_type": "call"}]} +{"seq_id": "276040371", "text": "from collections import Counter\nfrom typing import Dict, List, Tuple\n\nimport torch\n\nfrom utils.common import UNK, SOS, EOS, PAD\n\n\ndef get_dict_of_subtokens(\n token_to_id: Dict, n_most_common: int = -1, delimiter: str = '|', required_tokens: List = None\n) -> Dict:\n \"\"\"Create a new dict for converting subtokens to id\n Information about subtokens gets from token_to_id dict\n\n :param token_to_id: an original dict\n :param n_most_common: if passed, use only most common subtokens\n :param delimiter: used in tokens to divide into subtokens\n :param required_tokens: which tokens have been in a new dict, if None, than use [UNK, SOS, EOS, PAD]\n :return: the new dict of subtokens\n \"\"\"\n if required_tokens is None:\n required_tokens = [UNK, SOS, EOS, PAD]\n subtoken_counter = Counter()\n for token, i in token_to_id.items():\n subtoken_counter.update(token.split(delimiter))\n for token in required_tokens:\n if token in subtoken_counter:\n del subtoken_counter[token]\n subtoken_to_id = {}\n subtoken_to_id.update(\n [(token, num) for num, token in enumerate(required_tokens)]\n )\n if n_most_common == -1:\n n_most_common = len(subtoken_counter)\n subtoken_to_id.update(\n [(label[0], num + len(required_tokens))\n for num, label in enumerate(subtoken_counter.most_common(n_most_common))]\n )\n return subtoken_to_id\n\n\ndef get_token_to_subtoken_dict(\n tokens: List[str], subtoken_to_id: Dict, delimiter: str = '|'\n) -> Dict:\n \"\"\"Create a dict for converting token to corresponding tensor with subtoken's ids\n\n :param tokens: list of tokens\n :param subtoken_to_id: dict for converting subtoken to its id\n :param delimiter: used in tokens to divide into subtokens\n :return: new dict\n \"\"\"\n token_to_subtoken = {}\n unk_index = subtoken_to_id[UNK]\n for token in tokens:\n cur_split = torch.tensor([subtoken_to_id.get(tok, unk_index) for tok in token.split(delimiter)])\n token_to_subtoken[token] = cur_split\n return token_to_subtoken\n\n\ndef convert_label_to_sublabels(\n labels: List[str], sublabel_to_id: Dict, delimiter: str = '|'\n) -> torch.Tensor:\n \"\"\"Convert batch of labels to torch tensor with ids of corresponding sublabels\n SOS token is added at the beginning and EOS token is added at the ending for each label\n PAD token is used for fill empty slots in tensor\n\n :param labels: list of labels (shape: [batch_size])\n :param sublabel_to_id: dict for converting sublabels to ids\n :param delimiter: used in tokens to divide into subtokens\n :return: tensor with information about sublabels (shape: [max_length_of_sublabels + 2, batch_size])\n \"\"\"\n label_to_sublabel = get_token_to_subtoken_dict(labels, sublabel_to_id, delimiter)\n\n sublabels_length = torch.tensor([label_to_sublabel[label].shape[0] for label in labels])\n max_sublabel_length = sublabels_length.max()\n torch_labels = torch.full((max_sublabel_length.item() + 2, len(labels)), sublabel_to_id[PAD],\n dtype=torch.long)\n torch_labels[0, :] = sublabel_to_id[SOS]\n torch_labels[sublabels_length + 1, torch.arange(0, len(labels))] = sublabel_to_id[EOS]\n for sample, label in enumerate(labels):\n torch_labels[1:sublabels_length[sample] + 1, sample] = label_to_sublabel[label]\n return torch_labels\n\n\ndef get_token_id_to_subtoken_dict(\n token_ids: List[int], id_to_token: Dict, subtoken_to_id: Dict, delimiter: str = '|'\n) -> Dict:\n \"\"\"Create a dict for converting token's id to tensor of corresponding subtoken's ids\n\n :param token_ids: list of token's ids\n :param id_to_token: dict for converting token's id to token\n :param subtoken_to_id: dict for converting subtoken to id\n :param delimiter: used in tokens to divide into subtokens\n :return: new dict\n \"\"\"\n tokens = [id_to_token[token_id] for token_id in token_ids]\n token_to_subtoken = get_token_to_subtoken_dict(tokens, subtoken_to_id, delimiter)\n token_id_to_subtoken = {\n token_id: token_to_subtoken[id_to_token[token_id]] for token_id in token_ids\n }\n return token_id_to_subtoken\n", "sub_path": "utils/token_processing.py", "file_name": "token_processing.py", "file_ext": "py", "file_size_in_byte": 4181, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "typing.Dict", "line_number": 10, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 10, "usage_type": "name"}, {"api_name": "utils.common.UNK", "line_number": 22, "usage_type": "name"}, {"api_name": "utils.common.SOS", "line_number": 22, "usage_type": "name"}, {"api_name": "utils.common.EOS", "line_number": 22, "usage_type": "name"}, {"api_name": "utils.common.PAD", "line_number": 22, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 23, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 11, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 43, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 43, "usage_type": "name"}, {"api_name": "utils.common.UNK", "line_number": 53, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 55, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 44, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 61, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 61, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.full", "line_number": 76, "usage_type": "call"}, {"api_name": "utils.common.PAD", "line_number": 76, "usage_type": "name"}, {"api_name": "torch.long", "line_number": 77, "usage_type": "attribute"}, {"api_name": "utils.common.SOS", "line_number": 78, "usage_type": "name"}, {"api_name": "torch.arange", "line_number": 79, "usage_type": "call"}, {"api_name": "utils.common.EOS", "line_number": 79, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 62, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 86, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 86, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 87, "usage_type": "name"}]} +{"seq_id": "608492087", "text": "# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport requests\nimport json\n\n\ndef get_db(server_ip,url,params=''):\n url = \"http://\" + server_ip + \":9001/\" + url+params\n body = {\"dbName\": \"admin\"}\n headers = {'content-type': \"application/json\", 'Authorization': 'APP appid = 4abf1a,token = 9480295ab2e2eddb8'}\n response = requests.get(url, json=body, headers=headers)\n # 也可以直接将data字段换成json字段,2.4.3版本之后支持\n # response = requests.post(url, json = body, headers = headers)\n res = json.loads(response.text)\n # 返回信息\n print(res)\n return res\ndef upload_pic(server_ip,url, dbName,name,paths):\n url=\"http://\"+server_ip+\":9001/\"+url\n body = {\"dbName\":dbName,\"getFeatrue\":0,\"qualityThreshold\":0.9}\n imgs = list()\n for path in paths:\n with open(path, 'rb') as f:\n imgs.append(('imageDatas', (name, f.read())))\n\n response = requests.post(url, body,files=imgs)\n # 也可以直接将data字段换成json字段,2.4.3版本之后支持\n # response = requests.post(url, json = body, headers = headers)\n res=json.loads(response.text)\n # 返回信息\n return res\n # print(response.status_code)\nif __name__ == '__main__':\n server_ip = \"192.168.55.120\"\n\n # get_db(server_ip,url=\"verify/target/gets\")\n\n name=\"lbg\"\n paths = [\"d:/lbg.jpg\", \"d:/lbg2.jpg\"]\n # paths = [\"d:/jiang.png\"]\n res=upload_pic(server_ip,url=\"verify/face/synAdd\",dbName=\"sbd_test01\",name=name,paths=paths)\n if res['result'] == \"error\":\n if \"FAILED_TO_CREATE_DB_EXISTS\" == res['errorMessage']:\n print(1)\n print(\"error\", res['errorMessage'])\n elif res['result'] == 'success':\n for person in res['success']:\n print(person['personId'],person['qualityScore'],person['name'])\n #\n # res = create_db(server_ip, url=\"verify/target/deletes\", params=\"?dbName=aaa4\")\n # if res['result'] == \"error\":\n # if \"FAILED_TO_CREATE_DB_EXISTS\" == res['errorMessage']:\n # print(1)\n # print(\"error\", res['errorMessage'])\n # elif res['result'] == 'success':\n # print(\"ok\", res['data'])", "sub_path": "shangtang/pics.py", "file_name": "pics.py", "file_ext": "py", "file_size_in_byte": 2146, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "requests.get", "line_number": 12, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 15, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 27, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "321705572", "text": "\"\"\"\nThis file demonstrates two different styles of tests (one doctest and one\nunittest). These will both pass when you run \"manage.py test\".\n\nReplace these with more appropriate tests for your application.\n\"\"\"\n\nfrom django.test import TestCase\nfrom django.http import HttpRequest\nimport HvZ.views\n\nclass ConfirmPagesRender(TestCase):\n anonymous = HvZ.views.anonymous_info()\n me = HvZ.views.User.objects.get(email=\"jthemphill@gmail.com\")\n\n def req(self, user):\n \"\"\"Returns an HttpRequest corresponding to the given user\"\"\"\n req = HttpRequest()\n req.user = user\n return req\n\n def render_views(self):\n req = self.req(self.me)\n \n for f in [player_user_search]:\n f(req)\n\nclass SimpleTest(TestCase):\n def test_basic_addition(self):\n \"\"\"\n Tests that 1 + 1 always equals 2.\n \"\"\"\n self.failUnlessEqual(1 + 1, 2)\n\n__test__ = {\"doctest\": \"\"\"\nAnother way to test that 1 + 1 is equal to 2.\n\n>>> 1 + 1 == 2\nTrue\n\"\"\"}\n\n", "sub_path": "DjangoProject/HvZ/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 1007, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.test.TestCase", "line_number": 12, "usage_type": "name"}, {"api_name": "HvZ.views.views.anonymous_info", "line_number": 13, "usage_type": "call"}, {"api_name": "HvZ.views.views", "line_number": 13, "usage_type": "attribute"}, {"api_name": "HvZ.views", "line_number": 13, "usage_type": "name"}, {"api_name": "HvZ.views.views.User.objects.get", "line_number": 14, "usage_type": "call"}, {"api_name": "HvZ.views.views", "line_number": 14, "usage_type": "attribute"}, {"api_name": "HvZ.views", "line_number": 14, "usage_type": "name"}, {"api_name": "django.http.HttpRequest", "line_number": 18, "usage_type": "call"}, {"api_name": "django.test.TestCase", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "70364188", "text": "import datetime\nfrom django.test import TestCase\nfrom django.utils import timezone\nfrom polls.models import Choice, Question\n\n\nclass QuestionModelTest(TestCase):\n def test_creating_a_new_poll_and_saving_it_to_the_database(self):\n # start by creating a new Poll object with its \"question\" and\n # \"pub_date\" attributes set\n poll = Question()\n poll.question_text = \"What's up?\"\n poll.pub_date = timezone.now()\n\n # check we can save it to the database\n poll.save()\n\n # now check we can find it in the database again\n all_polls_in_database = Question.objects.all()\n self.assertEquals(len(all_polls_in_database), 1)\n only_poll_in_database = all_polls_in_database[0]\n self.assertEquals(only_poll_in_database, poll)\n\n # and check that it's saved its two attributes: question and pub_date\n self.assertEquals(only_poll_in_database.question_text, \"What's up?\")\n self.assertEquals(only_poll_in_database.pub_date, poll.pub_date)\n\n def test_verbose_name_for_pub_date(self):\n for field in Question._meta.fields:\n if field.name == 'pub_date':\n self.assertEquals(field.verbose_name, 'date published')\n\n\n def test_poll_can_tell_you_its_total_number_of_votes(self):\n p = Question(question_text='where', pub_date=timezone.now())\n p.save()\n c1 = Choice(question=p, choice_text='here', votes=0)\n c1.save()\n c2 = Choice(question=p, choice_text='there', votes=0)\n c2.save()\n\n self.assertEquals(p.total_votes(), 0)\n\n c1.votes = 1000\n c1.save()\n c2.votes = 22\n c2.save()\n self.assertEquals(p.total_votes(), 1022)\n\n def was_published_recently(self):\n p = Question(question_text='where', pub_date=timezone.now())\n p.save()\n self.assertTrue(p.was_published_recently())\n\n def was_not_published_recently(self):\n p = Question(question_text='where', pub_date=(timezone.now() - datetime.timedelta(10)))\n p.save()\n self.assertFalse(p.was_published_recently())\n\n\nclass ChoiceModelTest(TestCase):\n def test_creating_some_choices_for_a_poll(self):\n # start by creating a new Poll object\n poll = Question()\n poll.question_text = \"What's up?\"\n poll.pub_date = timezone.now()\n poll.save()\n\n # now create a Choice object\n choice = Choice()\n\n # link it with our Poll\n choice.question = poll\n\n # give it some text\n choice.choice_text = \"doin' fine...\"\n\n # and let's say it's had some votes\n choice.votes = 3\n\n # save it\n choice.save()\n\n # try retrieving it from the database, using the poll object's reverse\n # lookup\n poll_choices = poll.choice_set.all()\n self.assertEquals(poll_choices.count(), 1)\n\n # finally, check its attributes have been saved\n choice_from_db = poll_choices[0]\n self.assertEquals(choice_from_db, choice)\n self.assertEquals(choice_from_db.choice_text, \"doin' fine...\")\n self.assertEquals(choice_from_db.votes, 3)\n\n\n def test_choice_defaults(self):\n choice = Choice()\n self.assertEquals(choice.votes, 0)\n\n\n def test_choice_can_calculate_its_own_percentage_of_votes(self):\n poll = Question(question_text='who?', pub_date=timezone.now())\n poll.save()\n choice1 = Choice(question=poll, choice_text='me', votes=2)\n choice1.save()\n choice2 = Choice(question=poll, choice_text='you', votes=1)\n choice2.save()\n\n self.assertEquals(choice1.percentage(), 100 * 2 / 3.0)\n self.assertEquals(choice2.percentage(), 100 * 1 / 3.0)\n\n # also check 0-votes case\n choice1.votes = 0\n choice1.save()\n choice2.votes = 0\n choice2.save()\n self.assertEquals(choice1.percentage(), 0)\n self.assertEquals(choice2.percentage(), 0)\n", "sub_path": "polls/tests/test_models.py", "file_name": "test_models.py", "file_ext": "py", "file_size_in_byte": 3953, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.test.TestCase", "line_number": 7, "usage_type": "name"}, {"api_name": "polls.models.Question", "line_number": 11, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 13, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 13, "usage_type": "name"}, {"api_name": "polls.models.Question.objects.all", "line_number": 19, "usage_type": "call"}, {"api_name": "polls.models.Question.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "polls.models.Question", "line_number": 19, "usage_type": "name"}, {"api_name": "polls.models.Question._meta", "line_number": 29, "usage_type": "attribute"}, {"api_name": "polls.models.Question", "line_number": 29, "usage_type": "name"}, {"api_name": "polls.models.Question", "line_number": 35, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 35, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 35, "usage_type": "name"}, {"api_name": "polls.models.Choice", "line_number": 37, "usage_type": "call"}, {"api_name": "polls.models.Choice", "line_number": 39, "usage_type": "call"}, {"api_name": "polls.models.Question", "line_number": 51, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 51, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 51, "usage_type": "name"}, {"api_name": "polls.models.Question", "line_number": 56, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 56, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 56, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 56, "usage_type": "call"}, {"api_name": "django.test.TestCase", "line_number": 61, "usage_type": "name"}, {"api_name": "polls.models.Question", "line_number": 64, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 66, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 66, "usage_type": "name"}, {"api_name": "polls.models.Choice", "line_number": 70, "usage_type": "call"}, {"api_name": "polls.models.Choice", "line_number": 97, "usage_type": "call"}, {"api_name": "polls.models.Question", "line_number": 102, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 102, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 102, "usage_type": "name"}, {"api_name": "polls.models.Choice", "line_number": 104, "usage_type": "call"}, {"api_name": "polls.models.Choice", "line_number": 106, "usage_type": "call"}]} +{"seq_id": "482537898", "text": "# cfg80211 test cases\n# Copyright (c) 2014, Jouni Malinen \n#\n# This software may be distributed under the terms of the BSD license.\n# See README for more details.\n\nfrom remotehost import remote_compatible\nimport logging\nlogger = logging.getLogger()\nimport binascii\nimport os\nimport time\n\nimport hostapd\nimport hwsim_utils\nfrom tshark import run_tshark\nfrom nl80211 import *\nfrom wpasupplicant import WpaSupplicant\n\ndef nl80211_command(dev, cmd, attr):\n res = dev.request(\"VENDOR ffffffff {} {}\".format(nl80211_cmd[cmd],\n binascii.hexlify(attr)))\n if \"FAIL\" in res:\n raise Exception(\"nl80211 command failed\")\n return binascii.unhexlify(res)\n\n@remote_compatible\ndef test_cfg80211_disassociate(dev, apdev):\n \"\"\"cfg80211 disassociation command\"\"\"\n hapd = hostapd.add_ap(apdev[0], { \"ssid\": \"open\" })\n dev[0].connect(\"open\", key_mgmt=\"NONE\", scan_freq=\"2412\")\n ev = hapd.wait_event([ \"AP-STA-CONNECTED\" ], timeout=5)\n if ev is None:\n raise Exception(\"No connection event received from hostapd\")\n\n ifindex = int(dev[0].get_driver_status_field(\"ifindex\"))\n attrs = build_nl80211_attr_u32('IFINDEX', ifindex)\n attrs += build_nl80211_attr_u16('REASON_CODE', 1)\n attrs += build_nl80211_attr_mac('MAC', apdev[0]['bssid'])\n nl80211_command(dev[0], 'DISASSOCIATE', attrs)\n\n ev = hapd.wait_event([ \"AP-STA-DISCONNECTED\" ], timeout=5)\n if ev is None:\n raise Exception(\"No disconnection event received from hostapd\")\n\ndef nl80211_frame(dev, ifindex, frame, freq=None, duration=None, offchannel_tx_ok=False):\n attrs = build_nl80211_attr_u32('IFINDEX', ifindex)\n if freq is not None:\n attrs += build_nl80211_attr_u32('WIPHY_FREQ', freq)\n if duration is not None:\n attrs += build_nl80211_attr_u32('DURATION', duration)\n if offchannel_tx_ok:\n attrs += build_nl80211_attr_flag('OFFCHANNEL_TX_OK')\n attrs += build_nl80211_attr('FRAME', frame)\n return parse_nl80211_attrs(nl80211_command(dev, 'FRAME', attrs))\n\ndef nl80211_frame_wait_cancel(dev, ifindex, cookie):\n attrs = build_nl80211_attr_u32('IFINDEX', ifindex)\n attrs += build_nl80211_attr('COOKIE', cookie)\n return nl80211_command(dev, 'FRAME_WAIT_CANCEL', attrs)\n\ndef nl80211_remain_on_channel(dev, ifindex, freq, duration):\n attrs = build_nl80211_attr_u32('IFINDEX', ifindex)\n attrs += build_nl80211_attr_u32('WIPHY_FREQ', freq)\n attrs += build_nl80211_attr_u32('DURATION', duration)\n return nl80211_command(dev, 'REMAIN_ON_CHANNEL', attrs)\n\ndef test_cfg80211_tx_frame(dev, apdev, params):\n \"\"\"cfg80211 offchannel TX frame command\"\"\"\n\n dev[0].p2p_start_go(freq='2412')\n go = WpaSupplicant(dev[0].group_ifname)\n frame = binascii.unhexlify(\"d0000000020000000100\" + go.own_addr().translate(None, ':') + \"02000000010000000409506f9a090001dd5e506f9a0902020025080401001f0502006414060500585804510b0906000200000000000b1000585804510b0102030405060708090a0b0d1d000200000000000108000000000000000000101100084465766963652041110500585804510bdd190050f204104a0001101012000200011049000600372a000120\")\n ifindex = int(go.get_driver_status_field(\"ifindex\"))\n res = nl80211_frame(go, ifindex, frame, freq=2422, duration=500,\n offchannel_tx_ok=True)\n time.sleep(0.1)\n\n # note: Uncommenting this seems to remove the incorrect channel issue\n #nl80211_frame_wait_cancel(dev[0], ifindex, res[nl80211_attr['COOKIE']])\n\n # note: this Action frame ends up getting sent incorrectly on 2422 MHz\n nl80211_frame(go, ifindex, frame, freq=2412)\n time.sleep(1.5)\n # note: also the Deauthenticate frame sent by the GO going down ends up\n # being transmitted incorrectly on 2422 MHz.\n\n del go\n\n out = run_tshark(os.path.join(params['logdir'], \"hwsim0.pcapng\"),\n \"wlan.fc.type_subtype == 13\", [\"radiotap.channel.freq\"])\n if out is not None:\n freq = out.splitlines()\n if len(freq) != 2:\n raise Exception(\"Unexpected number of Action frames (%d)\" % len(freq))\n if freq[0] != \"2422\":\n raise Exception(\"First Action frame on unexpected channel: %s MHz\" % freq[0])\n if freq[1] != \"2412\":\n raise Exception(\"Second Action frame on unexpected channel: %s MHz\" % freq[1])\n\n@remote_compatible\ndef test_cfg80211_wep_key_idx_change(dev, apdev):\n \"\"\"WEP Shared Key authentication and key index change without deauth\"\"\"\n hapd = hostapd.add_ap(apdev[0],\n { \"ssid\": \"wep-shared-key\",\n \"wep_key0\": '\"hello12345678\"',\n \"wep_key1\": '\"other12345678\"',\n \"auth_algs\": \"2\" })\n id = dev[0].connect(\"wep-shared-key\", key_mgmt=\"NONE\", auth_alg=\"SHARED\",\n wep_key0='\"hello12345678\"',\n wep_key1='\"other12345678\"',\n wep_tx_keyidx=\"0\",\n scan_freq=\"2412\")\n hwsim_utils.test_connectivity(dev[0], hapd)\n\n dev[0].set_network(id, \"wep_tx_keyidx\", \"1\")\n\n # clear cfg80211 auth state to allow new auth without deauth frame\n ifindex = int(dev[0].get_driver_status_field(\"ifindex\"))\n attrs = build_nl80211_attr_u32('IFINDEX', ifindex)\n attrs += build_nl80211_attr_u16('REASON_CODE', 1)\n attrs += build_nl80211_attr_mac('MAC', apdev[0]['bssid'])\n attrs += build_nl80211_attr_flag('LOCAL_STATE_CHANGE')\n nl80211_command(dev[0], 'DEAUTHENTICATE', attrs)\n dev[0].wait_disconnected(timeout=5, error=\"Local-deauth timed out\")\n\n # the previous command results in deauth event followed by auto-reconnect\n dev[0].wait_connected(timeout=10, error=\"Reassociation timed out\")\n hwsim_utils.test_connectivity(dev[0], hapd)\n\n@remote_compatible\ndef test_cfg80211_hostapd_ext_sta_remove(dev, apdev):\n \"\"\"cfg80211 DEL_STATION issued externally to hostapd\"\"\"\n hapd = hostapd.add_ap(apdev[0],\n { \"ssid\": \"open\" })\n id = dev[0].connect(\"open\", key_mgmt=\"NONE\", scan_freq=\"2412\")\n\n ifindex = int(hapd.get_driver_status_field(\"ifindex\"))\n attrs = build_nl80211_attr_u32('IFINDEX', ifindex)\n attrs += build_nl80211_attr_u16('REASON_CODE', 1)\n attrs += build_nl80211_attr_u8('MGMT_SUBTYPE', 12)\n attrs += build_nl80211_attr_mac('MAC', dev[0].own_addr())\n nl80211_command(hapd, 'DEL_STATION', attrs)\n\n # Currently, hostapd ignores the NL80211_CMD_DEL_STATION event if\n # drv->device_ap_sme == 0 (which is the case with mac80211_hwsim), so no\n # further action happens here. If that event were to be used to remove the\n # STA entry from hostapd even in device_ap_sme == 0 case, this test case\n # could be extended to cover additional operations.\n", "sub_path": "tests/hwsim/test_cfg80211.py", "file_name": "test_cfg80211.py", "file_ext": "py", "file_size_in_byte": 6729, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "binascii.hexlify", "line_number": 22, "usage_type": "call"}, {"api_name": "binascii.unhexlify", "line_number": 25, "usage_type": "call"}, {"api_name": "hostapd.add_ap", "line_number": 30, "usage_type": "call"}, {"api_name": "remotehost.remote_compatible", "line_number": 27, "usage_type": "name"}, {"api_name": "wpasupplicant.WpaSupplicant", "line_number": 72, "usage_type": "call"}, {"api_name": "binascii.unhexlify", "line_number": 73, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 77, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 84, "usage_type": "call"}, {"api_name": "tshark.run_tshark", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "hostapd.add_ap", "line_number": 104, "usage_type": "call"}, {"api_name": "hwsim_utils.test_connectivity", "line_number": 114, "usage_type": "call"}, {"api_name": "hwsim_utils.test_connectivity", "line_number": 129, "usage_type": "call"}, {"api_name": "remotehost.remote_compatible", "line_number": 101, "usage_type": "name"}, {"api_name": "hostapd.add_ap", "line_number": 134, "usage_type": "call"}, {"api_name": "remotehost.remote_compatible", "line_number": 131, "usage_type": "name"}]} +{"seq_id": "230403726", "text": "# conflicts with isort because of local non-relative import\n# pylint: disable=wrong-import-order\n\n\nimport logging\nimport sys\nfrom datetime import datetime, timedelta\n\nfrom dependencies import logger\nfrom models.tortoise_models import TaskRule\nfrom tortoise import Tortoise, run_async\n\nfrom task_scheduler.app_config import default_config\nfrom task_scheduler.models.tortoise_models.helpers.task_rule_definition import (\n FrequencyEnum,\n)\nfrom task_scheduler.models.tortoise_models.task_rule import TaskTypeEnum\n\nlogger = logging.getLogger(\"clean_script\")\nhandler = logging.StreamHandler(sys.stdout)\nhandler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))\nlogger.addHandler(handler)\nlogger.setLevel(logging.DEBUG)\n\n\nlogger.info(\"started data creation\")\n\n\nasync def create_test_data():\n now = datetime.utcnow()\n future = now + timedelta(days=2)\n\n await TaskRule.create(\n name=\"test65\",\n task_type=TaskTypeEnum.LOOP,\n frequency=1,\n frequency_type=FrequencyEnum.ONCE,\n first_day_to_apply_rule=now,\n start_datetime=now,\n )\n\n await TaskRule.create(\n name=\"test_minutely\",\n task_type=TaskTypeEnum.LOOP,\n frequency=60,\n frequency_type=FrequencyEnum.MINUTELY,\n first_day_to_apply_rule=now,\n start_datetime=now,\n end_datetime=future,\n )\n\n\nasync def run():\n await Tortoise.init(\n db_url=default_config.db_url,\n modules={\"models\": [\"task_scheduler.models.tortoise_models\"]},\n # generate_schemas=True,\n )\n # Tortoise.init_models([\"task_scheduler.models.tortoise_models\"], \"models\")\n\n await Tortoise.generate_schemas()\n await create_test_data()\n\n\ndef main():\n run_async(run())\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "packages/task-scheduler/task_scheduler/generate_test_data.py", "file_name": "generate_test_data.py", "file_ext": "py", "file_size_in_byte": 1759, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "dependencies.logger", "line_number": 19, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 19, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 20, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 20, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 21, "usage_type": "call"}, {"api_name": "logging.BASIC_FORMAT", "line_number": 21, "usage_type": "attribute"}, {"api_name": "dependencies.logger.addHandler", "line_number": 22, "usage_type": "call"}, {"api_name": "dependencies.logger", "line_number": 22, "usage_type": "name"}, {"api_name": "dependencies.logger.setLevel", "line_number": 23, "usage_type": "call"}, {"api_name": "dependencies.logger", "line_number": 23, "usage_type": "name"}, {"api_name": "logging.DEBUG", "line_number": 23, "usage_type": "attribute"}, {"api_name": "dependencies.logger.info", "line_number": 26, "usage_type": "call"}, {"api_name": "dependencies.logger", "line_number": 26, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 31, "usage_type": "call"}, {"api_name": "models.tortoise_models.TaskRule.create", "line_number": 33, "usage_type": "call"}, {"api_name": "models.tortoise_models.TaskRule", "line_number": 33, "usage_type": "name"}, {"api_name": "task_scheduler.models.tortoise_models.task_rule.TaskTypeEnum.LOOP", "line_number": 35, "usage_type": "attribute"}, {"api_name": "task_scheduler.models.tortoise_models.task_rule.TaskTypeEnum", "line_number": 35, "usage_type": "name"}, {"api_name": "task_scheduler.models.tortoise_models.helpers.task_rule_definition.FrequencyEnum.ONCE", "line_number": 37, "usage_type": "attribute"}, {"api_name": "task_scheduler.models.tortoise_models.helpers.task_rule_definition.FrequencyEnum", "line_number": 37, "usage_type": "name"}, {"api_name": "models.tortoise_models.TaskRule.create", "line_number": 42, "usage_type": "call"}, {"api_name": "models.tortoise_models.TaskRule", "line_number": 42, "usage_type": "name"}, {"api_name": "task_scheduler.models.tortoise_models.task_rule.TaskTypeEnum.LOOP", "line_number": 44, "usage_type": "attribute"}, {"api_name": "task_scheduler.models.tortoise_models.task_rule.TaskTypeEnum", "line_number": 44, "usage_type": "name"}, {"api_name": "task_scheduler.models.tortoise_models.helpers.task_rule_definition.FrequencyEnum.MINUTELY", "line_number": 46, "usage_type": "attribute"}, {"api_name": "task_scheduler.models.tortoise_models.helpers.task_rule_definition.FrequencyEnum", "line_number": 46, "usage_type": "name"}, {"api_name": "tortoise.Tortoise.init", "line_number": 54, "usage_type": "call"}, {"api_name": "tortoise.Tortoise", "line_number": 54, "usage_type": "name"}, {"api_name": "task_scheduler.app_config.default_config.db_url", "line_number": 55, "usage_type": "attribute"}, {"api_name": "task_scheduler.app_config.default_config", "line_number": 55, "usage_type": "name"}, {"api_name": "tortoise.Tortoise.generate_schemas", "line_number": 61, "usage_type": "call"}, {"api_name": "tortoise.Tortoise", "line_number": 61, "usage_type": "name"}, {"api_name": "tortoise.run_async", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "606917253", "text": "import pytest\nimport jinja2\nfrom scanapi.reporter import Reporter\n\nfake_responses = [\n {\"status_code\": 200, \"request\": {\"method\": \"GET\", \"url\": \"http://test.com\"}}\n]\n\n\nclass TestConsoleReport:\n @pytest.fixture\n def mocked_print(self, mocker):\n return mocker.patch(\"builtins.print\")\n\n def test_should_print(self, mocker, mocked_print):\n console_reporter = Reporter(None, \"console\")\n console_reporter.write(fake_responses)\n\n expected_content = \"\\n\".join(\n (\n \"ScanAPI Report: Console\",\n \"=======================\",\n \"\",\n \"GET http://test.com - 200\",\n \"\",\n )\n )\n\n mocked_print.assert_called_once_with(f\"\\n{expected_content}\")\n\n\nclass TestReporterOtherThanConsole:\n @pytest.fixture\n def mocked__render_content(self, mocker):\n return mocker.patch(\"scanapi.reporter.Reporter._render_content\")\n\n @pytest.fixture\n def mocked_open(self, mocker):\n mock = mocker.mock_open()\n mocker.patch(\"builtins.open\", mock)\n return mock\n\n @pytest.mark.parametrize(\"reporter_type\", [\"html\", \"markdown\"])\n def test_should_write_to_default_output(\n self, reporter_type, mocker, mocked__render_content, mocked_open\n ):\n mocked__render_content.return_value = \"ScanAPI Report\"\n reporter = Reporter(None, reporter_type)\n reporter.write(fake_responses)\n\n file_extension = {\"html\": \"html\", \"markdown\": \"md\"}[reporter_type]\n mocked_open.assert_called_once_with(\n f\"scanapi-report.{file_extension}\", \"w\", newline=\"\\n\"\n )\n mocked_open().write.assert_called_once_with(\"ScanAPI Report\")\n\n def test_should_write_to_custom_output(\n self, mocker, mocked__render_content, mocked_open\n ):\n mocked__render_content.return_value = \"ScanAPI Report\"\n reporter = Reporter(\"./custom/report-output.html\", \"html\")\n reporter.write(fake_responses)\n\n mocked_open.assert_called_once_with(\n \"./custom/report-output.html\", \"w\", newline=\"\\n\"\n )\n mocked_open().write.assert_called_once_with(\"ScanAPI Report\")\n\n def test_should_handle_custom_templates(\n self, mocker, mocked__render_content, mocked_open\n ):\n mocked__render_content.return_value = \"ScanAPI Report\"\n reporter = Reporter(None, None, \"my-template.html\")\n reporter.write(fake_responses)\n\n mocked_open.assert_called_once_with(\"scanapi-report\", \"w\", newline=\"\\n\")\n mocked_open().write.assert_called_once_with(\"ScanAPI Report\")\n", "sub_path": "tests/unit/test_reporter.py", "file_name": "test_reporter.py", "file_ext": "py", "file_size_in_byte": 2607, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pytest.fixture", "line_number": 11, "usage_type": "attribute"}, {"api_name": "scanapi.reporter.Reporter", "line_number": 16, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 37, "usage_type": "attribute"}, {"api_name": "scanapi.reporter.Reporter", "line_number": 48, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 43, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 43, "usage_type": "attribute"}, {"api_name": "scanapi.reporter.Reporter", "line_number": 61, "usage_type": "call"}, {"api_name": "scanapi.reporter.Reporter", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "288682318", "text": "import json\n\n\nclass ExtractTranscript:\n def run(self, data):\n results = []\n for corpus in data:\n tokens = json.loads(corpus.contents)\n words = list(token['word'] for token in tokens if not token['filler'])\n corpus.contents = ' '.join(words)\n results.append(corpus)\n return results\n", "sub_path": "linguine/ops/extract_transcript.py", "file_name": "extract_transcript.py", "file_ext": "py", "file_size_in_byte": 349, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "json.loads", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "474393708", "text": "from datetime import datetime\nfrom flask import Flask, jsonify, request\n\napp = Flask(__name__)\nconfig = app.config.from_envvar('APP_SETTINGS')\n\nfrom application.models import db, Task\n\nwith app.app_context():\n db.init_app(app)\n db.create_all()\n\ndef get_task(task_id):\n return db.session.query(Task).filter_by(id=task_id).first_or_404()\n\n\n@app.route(\"/tasks\")\ndef tasks_list():\n tasks = db.session.query(Task).all()\n return jsonify(tasks=[i.serialize for i in tasks])\n\n@app.route(\"/tasks/\")\ndef task_by_id(task_id):\n task = get_task(task_id)\n return jsonify(task=task.serialize)\n\n@app.route(\"/tasks/post\", methods=['POST'])\ndef task_post():\n text = request.form.get('text')\n date = request.form.get('date')\n date_format = datetime.strptime(date, '%Y-%m-%d %H:%M')\n\n db.session.add(Task(text=text, date=date_format))\n db.session.commit()\n return jsonify()\n\n\nif __name__ == '__main__':\n\n\n app.run()\n", "sub_path": "application/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 951, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "flask.Flask", "line_number": 4, "usage_type": "call"}, {"api_name": "application.models.db.init_app", "line_number": 10, "usage_type": "call"}, {"api_name": "application.models.db", "line_number": 10, "usage_type": "name"}, {"api_name": "application.models.db.create_all", "line_number": 11, "usage_type": "call"}, {"api_name": "application.models.db", "line_number": 11, "usage_type": "name"}, {"api_name": "application.models.db.session.query", "line_number": 14, "usage_type": "call"}, {"api_name": "application.models.Task", "line_number": 14, "usage_type": "argument"}, {"api_name": "application.models.db.session", "line_number": 14, "usage_type": "attribute"}, {"api_name": "application.models.db", "line_number": 14, "usage_type": "name"}, {"api_name": "application.models.db.session.query", "line_number": 19, "usage_type": "call"}, {"api_name": "application.models.Task", "line_number": 19, "usage_type": "argument"}, {"api_name": "application.models.db.session", "line_number": 19, "usage_type": "attribute"}, {"api_name": "application.models.db", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 30, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 30, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 31, "usage_type": "name"}, {"api_name": "application.models.db.session.add", "line_number": 33, "usage_type": "call"}, {"api_name": "application.models.db.session", "line_number": 33, "usage_type": "attribute"}, {"api_name": "application.models.db", "line_number": 33, "usage_type": "name"}, {"api_name": "application.models.Task", "line_number": 33, "usage_type": "call"}, {"api_name": "application.models.db.session.commit", "line_number": 34, "usage_type": "call"}, {"api_name": "application.models.db.session", "line_number": 34, "usage_type": "attribute"}, {"api_name": "application.models.db", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "89042232", "text": "from django.db import migrations\n\nfrom utils.enums import City, Department as EnumDepartament\n\n\ndef insert_employee_data(apps, schema_editor):\n Office = apps.get_model(\"office\", \"Office\")\n Departament = apps.get_model('office', 'Departament')\n\n Office.objects.create(\n city=City.moscow.name,\n name='Главный',\n address='117246, Научный проезд 8 стр.1, офис 431',\n phone='84996537156',\n email='sales@dz.ru',\n )\n Office.objects.create(\n city=City.spb.name,\n name='второй',\n address='197374, ул. Оптиков 4',\n phone='88122009509',\n email='hello@e-legion.com',\n )\n Office.objects.create(\n city=City.kazan.name,\n name='третий',\n address='420107, ул. Петербургская 50, к. 5, офис 404',\n phone='88435705407',\n email='kazan@dz.ru',\n )\n Office.objects.create(\n city=City.ufa.name,\n name='четвертый',\n address='450076, ул. Чернышевского 82, к. 6, бизнес-центр Капитал, офис 615',\n phone='84996537156',\n email='ufa@dz.ru',\n )\n Office.objects.create(\n city=City.kalinin.name,\n name='западный',\n address='236022, ул. Карла Маркса 59',\n phone='78122009509',\n email='hello@e-legion.com',\n )\n\n for dep in EnumDepartament:\n Departament.objects.create(name=dep.name)\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('office', '0001_initial'),\n ]\n\n operations = [\n migrations.RunPython(insert_employee_data)\n ]\n", "sub_path": "office/migrations/0002_insert_office.py", "file_name": "0002_insert_office.py", "file_ext": "py", "file_size_in_byte": 1685, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "utils.enums.City.moscow", "line_number": 11, "usage_type": "attribute"}, {"api_name": "utils.enums.City", "line_number": 11, "usage_type": "name"}, {"api_name": "utils.enums.City.spb", "line_number": 18, "usage_type": "attribute"}, {"api_name": "utils.enums.City", "line_number": 18, "usage_type": "name"}, {"api_name": "utils.enums.City.kazan", "line_number": 25, "usage_type": "attribute"}, {"api_name": "utils.enums.City", "line_number": 25, "usage_type": "name"}, {"api_name": "utils.enums.City.ufa", "line_number": 32, "usage_type": "attribute"}, {"api_name": "utils.enums.City", "line_number": 32, "usage_type": "name"}, {"api_name": "utils.enums.City.kalinin", "line_number": 39, "usage_type": "attribute"}, {"api_name": "utils.enums.City", "line_number": 39, "usage_type": "name"}, {"api_name": "utils.enums.Department", "line_number": 46, "usage_type": "name"}, {"api_name": "django.db.migrations.Migration", "line_number": 50, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 50, "usage_type": "name"}, {"api_name": "django.db.migrations.RunPython", "line_number": 56, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 56, "usage_type": "name"}]} +{"seq_id": "101589702", "text": "import logging\r\nimport json\r\nimport sys\r\nimport os\r\n\r\nfrom sqlalchemy import create_engine\r\nfrom sqlalchemy.orm import sessionmaker\r\n\r\nfrom bot.orm.models import Base\r\n\r\n\r\ndatabase_url = os.environ.get('DATABASE_URL')\r\n\r\nif not database_url:\r\n try:\r\n with open('bot/bot_settings.json', 'r') as f:\r\n settings = json.load(f)\r\n database_url = settings['BOT']['database_url']\r\n except FileNotFoundError:\r\n print('No bot/bot_settings.json file found.')\r\n sys.exit(1)\r\n\r\nif database_url:\r\n engine = create_engine(database_url, pool_size=35, max_overflow=0)\r\nelse:\r\n engine = create_engine('sqlite:///db.sqlite3', connect_args={'timeout': 15})\r\n\r\nBase.metadata.create_all(bind=engine)\r\nSession = sessionmaker(bind=engine, autoflush=False)\r\n\r\nlogging.basicConfig()\r\nlogging.getLogger('sqlalchemy.engine').setLevel(logging.ERROR)\r\nlogging.getLogger('sqlalchemy.engine').addHandler(logging.FileHandler('db.log'))\r\n", "sub_path": "bot/orm/db.py", "file_name": "db.py", "file_ext": "py", "file_size_in_byte": 959, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "os.environ.get", "line_number": 12, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 12, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 26, "usage_type": "call"}, {"api_name": "bot.orm.models.Base.metadata.create_all", "line_number": 28, "usage_type": "call"}, {"api_name": "bot.orm.models.Base.metadata", "line_number": 28, "usage_type": "attribute"}, {"api_name": "bot.orm.models.Base", "line_number": 28, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 29, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 31, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 32, "usage_type": "call"}, {"api_name": "logging.ERROR", "line_number": 32, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 33, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "255573700", "text": "# -*- coding:utf-8 -*-\r\nimport json\r\nimport time,datetime,traceback\r\nimport requests\r\nimport os\r\nfrom jsonpath import jsonpath\r\nfrom automation_test import conn_mysql\r\nfrom automation_test.api_test.login_information import Login_envirment\r\n\r\nclass Bidding():\r\n\r\n\tdef __init__(self, host):\r\n\t\tself.session = requests.Session()\r\n\t\tself.environment = host\r\n\r\n\tdef get_json_path(self, result, json_value):\r\n\t\t# 获取json的jsonpath\r\n\t\ttry:\r\n\t\t\tlist_paths = []\r\n\t\t\tjson_name = jsonPath.get_path(result, \"$\", json_value)\r\n\t\t\tprint(json_name[0])\r\n\t\texcept:\r\n\t\t\tprint(\"未获取到对于的jsonpath\")\r\n\r\n\tdef file_param(self, dict_key):\r\n\t\timg_path = f\"{os.path.dirname(os.path.abspath(__file__))}/123.jpg\"\r\n\t\twith open(f\"{img_path}\", \"rb\") as f:\r\n\t\t\timg = f.read()\r\n\t\tfile = {dict_key: (\"123.jpg\", img)}\r\n\t\treturn file\r\n\r\n\tdef send_request(self, method, url_1, headers, params={}, files={}, bytes={},status=\"form-data\"):\r\n\t\ttry:\r\n\t\t\turl = f\"{self.environment}{url_1}\"\r\n\t\t\tif method == \"get\":\r\n\t\t\t\tresponse = self.session.get(url, headers=headers[\"header\"], params=params, files=files)\r\n\r\n\t\t\telse:\r\n\t\t\t\tif status == \"form-data\":\r\n\t\t\t\t\tresponse = self.session.post(url, headers=headers[\"header\"], data=params, files=files)\r\n\t\t\t\telif status == \"special\":\r\n\t\t\t\t\tresponse = self.session.post(url, headers=headers[\"header\"], data=params, bytes=bytes)\r\n\t\t\t\telse:\r\n\t\t\t\t\tresponse = self.session.post(url, headers=headers[\"header\"], json=params, files=files)\r\n\r\n\r\n\t\t\tif response.json()['errcode']==0:\r\n\t\t\t\tprint(response.status_code, response.json())\r\n\t\t\t\tif files:\r\n\t\t\t\t\ttime.sleep(1)\r\n\t\t\t#\r\n\t\t\t# else:\r\n\t\t\t# \tresponse=self.send_request(method, url, headers, params={}, files={}, bytes={}, status=\"form-data\")\r\n\r\n\t\t\treturn response\r\n\t\texcept Exception as exc:\r\n\t\t\tprint(exc)\r\n\t\t\tprint(traceback.format_exc())\r\n\r\n\tdef get_headers(self, username):\r\n\t\t# 获取 headers\r\n\t\thost=self.environment\r\n\t\trr = Login_envirment(host)\r\n\t\theaders = rr.get_login_token(username)\r\n\t\treturn headers\r\n\r\n\tdef get_datatime(self):\r\n\t\tthreeDayAgo = datetime.datetime.today() - datetime.timedelta(2)\r\n\t\totherStyleTime = threeDayAgo.strftime(\"%m%d\")\r\n\t\treturn otherStyleTime\r\n\r\n\tdef get_mysql_name(self):\r\n\t\thost = '192.168.188.12'\r\n\t\tdb = ''\r\n\t\tsql_data = \"show databases\"\r\n\t\tbaocun = conn_mysql.Operation_mysql().get_information(sql_data, host, db)\r\n\t\tprint(baocun)\r\n\t\tfor i in range(8):\r\n\t\t\tthreeDayAgo = datetime.datetime.today() - datetime.timedelta(i)\r\n\t\t\totherStyleTime_1 = threeDayAgo.strftime(\"%Y%m%d\")\r\n\t\t\tmysql_name = f'epodb_{otherStyleTime_1}'\r\n\t\t\tif mysql_name in baocun:\r\n\t\t\t\tself.set_mysql_name(mysql_name)\r\n\r\n\tdef set_mysql_name(self,names):\r\n\t\tself.mysql_name=names\r\n\r\n\tdef get_login_name(self,member_id):\r\n\t\thost = '192.168.188.12'\r\n\t\tdata_db='epodb_20201108'\r\n\t\tsql=f'SELECT login_name from backend_member where `id`={member_id};'\r\n\t\tmysql_test=conn_mysql.Operation_mysql().get_information(sql, host, data_db)\r\n\t\t# print(mysql_test[0])\r\n\t\treturn mysql_test[0]\r\n\r\n\tdef set_bidding_id(self,bidding_id):\r\n\t\tself.bidding_id=bidding_id\r\n\r\n\tdef set_payrate_id(self,payrate_id):\r\n\t\tself.payrate_id=payrate_id\r\n\r\n\tdef set_catelog_id(self,catelog_id):\r\n\t\tself.catelog_id=catelog_id\r\n\r\n\t\"\"\"\"创建智能招标单\"\"\"\r\n\tdef crete_bidding(self,headers,catelog_id=622):\r\n\t\tself.set_catelog_id(catelog_id)\r\n\t\turl_1=f'/bk/qtd/issue/sheet/create/'\r\n\t\tresponse1=self.send_request('post',url_1,headers)\r\n\t\tself.set_bidding_id(response1.json()['data']['id'])\r\n\t\t# self.bidding_ids(134)\r\n\r\n\t\t\"\"\"填写招标信息\"\"\"\r\n\t\turl_2=f'/bk/qtd/issue/sheet/info/{self.bidding_id}'\r\n\t\tend_time=int(time.time())+700000\r\n\t\tbody={'title':f'测试数据{self.bidding_id}','project_name':'测试智能招标数据','end_time':end_time,'comment':'测试数据007'}\r\n\t\tresponse2=self.send_request('post',url_2,headers,params=body)\r\n\r\n\t\t\"\"\"上传招标图片\"\"\"\r\n\t\turl_3=f'/bk/qtd/issue/sheet/announcement/{self.bidding_id}?format=json'\r\n\t\tbody={'type':'announcement_file','action':'add'}\r\n\t\tfiles=self.file_param('attachment') # 图片:招标公告(盖章版)\r\n\t\tresponse3=self.send_request('post',url_3,headers,params=body,files=files)\r\n\r\n\t\tbody = {'type': 'invitation_file', 'action': 'add'} # 图片:招标邀请函(盖章版)\r\n\t\tresponse3 = self.send_request('post', url_3, headers, params=body, files=files)\r\n\r\n\t\tbody = {'type': 'bid_file', 'action': 'add'} # 图片:招标文件(盖章版)\r\n\t\tresponse3 = self.send_request('post', url_3, headers, params=body, files=files)\r\n\r\n\t\tbody = {'type': 'spec_file', 'action': 'add'} # 图片:招标技术标准\r\n\t\tresponse3 = self.send_request('post', url_3, headers, params=body, files=files)\r\n\r\n\t\tbody = {'type': 'other_file', 'action': 'add'} # 图片:其他附件\r\n\t\tresponse3 = self.send_request('post', url_3, headers, params=body, files=files)\r\n\t\tresponse3 = self.send_request('post', url_3, headers, params=body, files=files)\r\n\r\n\t\tbody = {'type': 'announcement_content', 'action': 'add','content':'

测试数据007

'} # 文本框:备注信息\r\n\t\tresponse3=self.send_request('post',url_3,headers,params=body)\r\n\r\n\t\t\"\"\"提交招标信息\"\"\"\r\n\t\turl_4=f'/bk/qtd/sheet/dostep/{self.bidding_id}'\r\n\t\tbody={'curr_step':'qtd_status_issue_announcement'}\r\n\t\tresponse4=self.send_request('post',url_4,headers,params=body)\r\n\r\n\t\t\"\"\"获取商品分类信息\"\"\"\r\n\t\turl_5=f'/bk/product_name/onsale/metas/{self.catelog_id}'\r\n\t\tresponse5=self.send_request('get',url_5,headers)\r\n\t\tdetail_1=jsonpath(response5.json(),'$.data.name_list[0].detail_list[0].detail_id')\r\n\t\tdetail_2=jsonpath(response5.json(),'$.data.spec_list[0].detail_list[0].detail_id')\r\n\t\tdetail_3=jsonpath(response5.json(),'$.data.spec_list[0].detail_list[1].detail_id')\r\n\r\n\t\t\"\"\"添加招标商品\"\"\"\r\n\t\turl_6=f'/bk/qtd/issue/sheet/inquiry/add/{self.bidding_id}?format=json'\r\n\t\tbody={'catelog_id':self.catelog_id,'meta_ids':[detail_1,detail_2,detail_3],'unit':'箱'}\r\n\t\tresponse6=self.send_request('post',url_6,headers,params=body)\r\n\t\ttime.sleep(3)\r\n\r\n\t\t\"\"\"提交发标评审\"\"\"\r\n\t\turl_7=f'/bk/qtd/sheet/dostep/{self.bidding_id}'\r\n\t\tbody={'curr_step':'qtd_status_issue_details'}\r\n\t\tresponse7=self.send_request('post',url_7,headers,params=body)\r\n\r\n\t\t\"\"\"获取发标评审人员名单\"\"\"\r\n\t\turl_8=f'/bk/qtd/issue/sheet/approval/{self.bidding_id}?format=json'\r\n\t\tresponse8=self.send_request('get',url_8,headers)\r\n\t\tresults=response8.json()['data']['flows'][0]['stages']\r\n\t\tmembers=[]\r\n\t\tfor result in results:\r\n\t\t\tfor i in result:\r\n\t\t\t\tif i['type']=='member':\r\n\t\t\t\t\tmembers.append({'name':i['title'],'id':i['id'],'approver_id':i['approver_id'],'level':i['level']})\r\n\t\tprint('华元素审批人员信息:',members)\r\n\t\treturn members\r\n\r\n\t\"\"\"华元素通过审批\"\"\"\r\n\tdef approve_bidding(self,member,condition):\r\n\t\t\"\"\"从数据库查询审批人员登录名\"\"\"\r\n\t\tlogin_name=self.get_login_name(member['id'])\r\n\t\theaders=self.get_headers(login_name)\r\n\r\n\t\t\"\"\"通过发标审批\"\"\"\r\n\t\tif condition=='发标':\r\n\t\t\turl=f'/bk/qtd/issue/sheet/approval/{self.bidding_id}?format=json'\r\n\t\telif condition=='回标':\r\n\t\t\turl=f'/bk/qtd/return/approval/{self.bidding_id}?format=json'\r\n\t\telif condition=='最低价':\r\n\t\t\turl=f'/bk/qtd/eval/price/lowest/approval/{self.bidding_id}?format=json'\r\n\t\telif condition=='评标':\r\n\t\t\turl=f'/bk/qtd/eval/tenders/approval/{self.bidding_id}?format=json'\r\n\t\telif condition=='定标':\r\n\t\t\turl=f'/bk/qtd/scaling/companys/approval/{self.bidding_id}?format=json'\r\n\t\tbody={'action':'approve','content':'同意','approver_id':member['approver_id']}\r\n\t\tresponse=self.send_request('post',url,headers,params=body)\r\n\r\n\t\"\"\"查看招标信息\"\"\"\r\n\tdef bindding_information(self,headers):\r\n\r\n\t\turl_1=f'/bk/qtd/issue/sheet/info/{self.bidding_id}?format=json'\r\n\t\tresponse1=self.send_request('get',url_1,headers)\r\n\r\n\t\turl_2=f'/bk/qtd/issue/sheet/approval/{self.bidding_id}?format=json'\r\n\t\tresponse2=self.send_request('get',url_2,headers)\r\n\r\n\t\turl_3=f'/bk/qtd/issue/sheet/info/{self.bidding_id}?format=json'\r\n\t\tresponse3=self.send_request('get',url_3,headers)\r\n\r\n\t\"\"\"生产商提交资质评审\"\"\"\r\n\tdef submite_approve_for_bidding(self,headers):\r\n\t\t\"\"\"获取标书ID\"\"\"\r\n\t\turl_4=f'/bk/qtd/sheet/list/tender/?format=json&page=1&page_size=10&key=&status=all'\r\n\t\tresponse4=self.send_request('get',url_4,headers)\r\n\r\n\t\t# url_5=f'/bk/product_name/onsale/com/brands/{company_id}?format=json'\r\n\t\t# response5=self.send_request('get',url_5,headers)\r\n\r\n\t\t\"\"\"获取提交标书sheet_id\"\"\"\r\n\t\turl_1=f'/bk/qtd/tender/main/{self.bidding_id}?format=json'\r\n\t\tresponse1=self.send_request('get',url_1,headers)\r\n\t\tsheet_id=response1.json()['data']['id']\r\n\r\n\t\t\"\"\"上传投标附件\"\"\"\r\n\t\turl_2=f'/bk/qtd/tender/authentication/{sheet_id}?format=json'\r\n\t\tbody={'type':'invitation_receipt_file','action':'add'}\r\n\t\tfiles=self.file_param('attachment') # 投标邀请函回执\r\n\t\tresponse2=self.send_request('post',url_2,headers,params=body,files=files)\r\n\r\n\t\tbody = {'type': 'commitment_file', 'action': 'add'}\r\n\t\tfiles = self.file_param('attachment') # 投标承诺函\r\n\t\tresponse2 = self.send_request('post', url_2, headers, params=body, files=files)\r\n\r\n\t\tbody = {'type': 'spec_response_file', 'action': 'add'}\r\n\t\tfiles = self.file_param('attachment') # 技术标准响应表\r\n\t\tresponse2 = self.send_request('post', url_2, headers, params=body, files=files)\r\n\r\n\t\tbody = {'type': 'other_file', 'action': 'add'}\r\n\t\tfiles = self.file_param('attachment') # 资质文件\r\n\t\tresponse2 = self.send_request('post', url_2, headers, params=body, files=files)\r\n\r\n\t\t\"\"\"提交资质评审\"\"\"\r\n\t\turl_3=f'/bk/qtd/tender/authentication/{sheet_id}?format=json'\r\n\t\tbody={'type':'submit','action':'modify'}\r\n\t\tresponse3=self.send_request('post',url_3,headers,params=body)\r\n\r\n\t\"\"\"华元素提交厂家资质评审\"\"\"\r\n\tdef approve_author(self,headers):\r\n\t\t\"\"\"获取厂家资质ID\"\"\"\r\n\t\turl_1=f'/bk/qtd/return/auth/tenders/{self.bidding_id}?page=1&page_size=999&format=json'\r\n\t\tresponse1=self.send_request('get',url_1,headers)\r\n\t\ttender_id_1=response1.json()['data'][0]['id']\r\n\t\ttender_id_2=response1.json()['data'][1]['id']\r\n\r\n\t\t\"\"\"招标人通过资质评审\"\"\"\r\n\t\turl_2=f'/bk/qtd/return/auth/tender/{tender_id_1}?format=json'\r\n\t\tbody={'action':'approve','content':'测试数据007'}\r\n\t\tresponse2=self.send_request('post',url_2,headers,params=body)\r\n\t\ttender_ids_1=[]\r\n\t\tfor tender_id in response2.json()['data']['stages']:\r\n\t\t\t\tfor i in tender_id:\r\n\t\t\t\t\tif i['type'] == 'member':\r\n\t\t\t\t\t\ttender_ids_1.append({'name': i['title'], 'id': i['id'], 'approver_id': i['approver_id'], 'level': i['level']})\r\n\r\n\t\turl_3 = f'/bk/qtd/return/auth/tender/{tender_id_2}?format=json'\r\n\t\tbody = {'action': 'approve', 'content': '测试数据007'}\r\n\t\tresponse3 = self.send_request('post', url_3, headers, params=body)\r\n\t\ttender_ids_2 = []\r\n\t\tfor tender_id in response3.json()['data']['stages']:\r\n\t\t\tfor i in tender_id:\r\n\t\t\t\tif i['type'] == 'member':\r\n\t\t\t\t\ttender_ids_2.append({'name': i['title'], 'id': i['id'], 'approver_id': i['approver_id'], 'level': i['level']})\r\n\t\ttender_ids={tender_id_1:tender_ids_1,tender_id_2:tender_ids_2}\r\n\t\tprint(tender_ids)\r\n\t\treturn tender_ids\r\n\r\n\t\"\"\"华元素审批厂家资质\"\"\"\r\n\tdef appove_for_tender(self,tender_informations):\r\n\t\tfor tender_information in tender_informations:\r\n\t\t\ttender_id=tender_information\r\n\t\t\tfor member in tender_informations[tender_information]:\r\n\t\t\t\tmember_id=member['id']\r\n\t\t\t\tuser=self.get_login_name(member_id)\r\n\t\t\t\theaders=self.get_headers(user)\r\n\t\t\t\tapprove_id=member['approver_id']\r\n\t\t\t\turl=f'/bk/qtd/return/auth/tender/{tender_id}?format=json'\r\n\t\t\t\tbody={'action':'approve','content':'测试数据007','approver_id':approve_id}\r\n\t\t\t\tresponse=self.send_request('post',url,headers,params=body)\r\n\r\n\t\"\"\"厂家投标\"\"\"\r\n\tdef bidding_product(self,headers):\r\n\t\t\"\"\"获取投标ID\"\"\"\r\n\t\turl_1=f'/bk/qtd/tender/main/{self.bidding_id}?format=json'\r\n\t\tresponse1=self.send_request('get',url_1,headers)\r\n\t\ttender_id=response1.json()['data']['id']\r\n\r\n\t\t\"\"\"查看资质评审状态\"\"\"\r\n\t\turl_13 = f'/bk/qtd/tender/auth/confirm/{tender_id}?format=json'\r\n\t\tbody = {'action': 'confirm'}\r\n\t\tresponse13 = self.send_request('post', url_13, headers, params=body)\r\n\r\n\t\t\"\"\"获取投标产品报价ID\"\"\"\r\n\t\turl_2=f'/bk/qtd/tender/sales/prices/{tender_id}?page=1&page_size=1&format=json'\r\n\t\tresponse2=self.send_request('get',url_2,headers)\r\n\t\tproduct_ids=jsonpath(response2.json(),'$.data.quotations[0][0]')\r\n\t\tquotation_ids=[]\r\n\t\tfor i in product_ids:\r\n\t\t\tfor j in i:\r\n\t\t\t\tquotation_ids.append(i[j]['quotation_id'])\r\n\r\n\t\t\"\"\"生产商上传附件\"\"\"\r\n\t\turl_2_1=f'/bk/qtd/tender/files/'\r\n\t\tbody={'tender_id':tender_id,'action':'add'}\r\n\t\tfiles = self.file_param('file')\r\n\t\tfor i in range(2):\r\n\t\t\tresponse2 = self.send_request('post', url_2_1, headers,params=body,files=files)\r\n\r\n\t\t\"\"\"设置产品价格\"\"\"\r\n\t\turl_3=f'/bk/qtd/tender/sales/prices/{tender_id}?format=json'\r\n\t\tbody_1={'action':'modify','market_price':200,'quotation_ids':quotation_ids[0]}\r\n\t\tbody_2={'action':'modify','market_price':300,'quotation_ids':quotation_ids[1]}\r\n\t\tbody_3={'action':'modify','discount':90,'quotation_ids':quotation_ids[0]}\r\n\t\tbody_4={'action':'modify','discount':90,'quotation_ids':quotation_ids[1]}\r\n\t\tresponse3=self.send_request('post',url_3,headers,params=body_1)\r\n\t\tresponse3=self.send_request('post',url_3,headers,params=body_2)\r\n\t\tresponse3=self.send_request('post',url_3,headers,params=body_3)\r\n\t\tresponse3=self.send_request('post',url_3,headers,params=body_4)\r\n\r\n\t\t\"\"\"获取投标产品销售单元ID\"\"\"\r\n\t\turl_3= f'/bk/qtd/tender/sales/skumoq/{tender_id}?page=1&page_size=1&format=json'\r\n\t\tresponse3 = self.send_request('get', url_3, headers)\r\n\t\tproduct_ids = response3.json()['data']['sku_moq']\r\n\t\tsale_ids = []\r\n\t\tfor i in product_ids:\r\n\t\t\tsale_ids.append(product_ids[i]['sale_id'])\r\n\r\n\t\t\"\"\"设置投标产品销售单元\"\"\"\r\n\t\turl_4 = f'/bk/qtd/tender/sales/skumoq/{tender_id}?format=json'\r\n\t\tbody_1 = {'action': 'modify', 'moq': 10, 'sale_ids': sale_ids[0]}\r\n\t\tbody_2 = {'action': 'modify', 'moq': 10, 'sale_ids': sale_ids[1]}\r\n\t\tbody_3 = {'action': 'modify', 'sku': 1, 'sale_ids': sale_ids[0]}\r\n\t\tbody_4 = {'action': 'modify', 'sku': 1, 'sale_ids': sale_ids[1]}\r\n\t\tresponse3 = self.send_request('post', url_4, headers, params=body_1)\r\n\t\tresponse3 = self.send_request('post', url_4, headers, params=body_2)\r\n\t\tresponse3 = self.send_request('post', url_4, headers, params=body_3)\r\n\t\tresponse3 = self.send_request('post', url_4, headers, params=body_4)\r\n\r\n\t\t\"\"\"获取投标产品运费模板ID\"\"\"\r\n\t\turl_5=f'/bk/product_name/onsale/com/transports/?format=json'\r\n\t\tresponse5=self.send_request('get',url_5,headers)\r\n\t\ttransport_id=response5.json()['data'][0]['transport_template_id']\r\n\r\n\t\t\"\"\"设置投标产品运费模板\"\"\"\r\n\t\turl_6=f'/bk/qtd/tender/sales/transport/{tender_id}?format=json'\r\n\t\tbody_1={'action':'modify','sale_ids':sale_ids[0],'transport_id':transport_id}\r\n\t\tbody_2={'action':'modify','sale_ids':sale_ids[1],'transport_id':transport_id}\r\n\t\tresponse6=self.send_request('post',url_6,headers,params=body_1)\r\n\t\tresponse6=self.send_request('post',url_6,headers,params=body_2)\r\n\r\n\t\t\"\"\"设置投标产品图片\"\"\"\r\n\t\turl_7=f'/bk/qtd/tender/sales/images/{tender_id}?format=json'\r\n\t\tfiles=self.file_param('image')\r\n\t\tbody_1={'action':'add','sale_ids':sale_ids[0]}\r\n\t\tbody_2={'action':'add','sale_ids':sale_ids[1]}\r\n\t\tresponse7=self.send_request('post',url_7,headers,params=body_1,files=files)\r\n\t\tresponse7=self.send_request('post',url_7,headers,params=body_2,files=files)\r\n\r\n\t\t\"\"\"获取商品详情模板ID\"\"\"\r\n\t\ttry:\r\n\t\t\turl_8=f'/bk/product_name/onsale/sale/template/details/list/{self.catelog_id}?format=json'\r\n\t\t\tresponse8=self.send_request('get',url_8,headers)\r\n\t\t\ttemplate_id=response8.json()['data'][0]['id']\r\n\t\texcept:\r\n\t\t\turl_11=f'/bk/product_name/onsale/sale/template/details/create/{self.catelog_id}?format=json'\r\n\t\t\tbody={'format':'json'}\r\n\t\t\tresponse11=self.send_request('post',url_11,headers,params=body)\r\n\t\t\ttemplate_id=response11.json()['data']['id']\r\n\r\n\t\t\turl_12=f'/bk/product_name/onsale/sale/template/details/edit/{template_id}'\r\n\t\t\tbody= {'type': 'image','action': 'add'}\r\n\t\t\tfiles=self.file_param('image')\r\n\t\t\tresponse12=self.send_request('post',url_12,headers,params=body,files=files)\r\n\r\n\t\t\"\"\"设置商品详情模板\"\"\"\r\n\t\turl_9=f'/bk/qtd/tender/sales/details/{tender_id}?format=json'\r\n\t\tbody={'template_id':template_id,'action':'add','catelog_id':{self.catelog_id}}\r\n\t\tresponse9=self.send_request('post',url_9,headers,params=body)\r\n\t\ttime.sleep(2)\r\n\r\n\t\t\"\"\"提交投标\"\"\"\r\n\t\turl_10=f'/bk/qtd/tender/quotations/sumbit/{tender_id}?format=json'\r\n\t\tbody={'action':'submit'}\r\n\t\tresponse10=self.send_request('post',url_10,headers,params=body)\r\n\r\n\t\"\"\"华元素发起回标审批\"\"\"\r\n\tdef back_bidding(self,headers):\r\n\t\t\"\"\"获取投标信息\"\"\"\r\n\t\turl_1=f'/bk/qtd/return/tenders/round/{self.bidding_id}?page=1&page_size=999&format=json'\r\n\t\tresponse1=self.send_request('get',url_1,headers)\r\n\r\n\t\t\"\"\"发起回标审批\"\"\"\r\n\t\turl_2=f'/bk/qtd/return/tenders/{self.bidding_id}?format=json'\r\n\t\tbody={'comment':'测试数据007','action':'start_eval'}\r\n\t\tresponse2=self.send_request('post',url_2,headers,params=body)\r\n\r\n\t\t\"\"\"获取回标审批人员\"\"\"\r\n\t\turl_3=f'/bk/qtd/return/approval/{self.bidding_id}?format=json'\r\n\t\tresponse3=self.send_request('get',url_3,headers)\r\n\t\ttender_ids_1 = []\r\n\t\tfor tender_id in response3.json()['data']['flows'][0]['stages']:\r\n\t\t\tfor i in tender_id:\r\n\t\t\t\tif i['type'] == 'member':\r\n\t\t\t\t\ttender_ids_1.append({'name': i['title'], 'id': i['id'], 'approver_id': i['approver_id'], 'level': i['level']})\r\n\t\treturn tender_ids_1\r\n\r\n\t\"\"\"提交最低价审批\"\"\"\r\n\tdef min_price_approve(self,headers):\r\n\t\t\"\"\"触发评标\"\"\"\r\n\t\turl_4=f'/bk/qtd/return/approval/{self.bidding_id}?format=json'\r\n\t\tresponse4=self.send_request('get',url_4,headers)\r\n\r\n\t\t\"\"\"选择最低价\"\"\"\r\n\t\turl_3=f'/bk/qtd/eval/price/lowest/{self.bidding_id}?page=1&page_size=1&format=json'\r\n\t\tresponse3=self.send_request('get',url_3,headers)\r\n\r\n\t\t\"\"\"提交最低价审批\"\"\"\r\n\t\turl_1=f'/bk/qtd/eval/price/lowest/approval/{self.bidding_id}?format=json'\r\n\t\tbody={'action':'start','content':'测试数据007'}\r\n\t\tresponse1=self.send_request('post',url_1,headers,params=body)\r\n\r\n\t\t\"\"\"获取最低价审批成员\"\"\"\r\n\t\turl_2=f'/bk/qtd/eval/price/lowest/approval/{self.bidding_id}?format=json'\r\n\t\tresponse2=self.send_request('get',url_2,headers)\r\n\t\ttender_ids_1 = []\r\n\t\tfor tender_id in response2.json()['data']['flows'][0]['stages']:\r\n\t\t\tfor i in tender_id:\r\n\t\t\t\tif i['type'] == 'member':\r\n\t\t\t\t\ttender_ids_1.append(\r\n\t\t\t\t\t\t{'name': i['title'], 'id': i['id'], 'approver_id': i['approver_id'], 'level': i['level']})\r\n\t\treturn tender_ids_1\r\n\r\n\t\"\"\"提交评标审批\"\"\"\r\n\tdef evaluate_bidding(self,headers):\r\n\t\t\"\"\"触发评标\"\"\"\r\n\t\turl_3=f'/bk/qtd/eval/price/lowest/approval/{self.bidding_id}?format=json'\r\n\t\tresponse3=self.send_request('get',url_3,headers)\r\n\r\n\t\t\"\"\"获取议标ID\"\"\"\r\n\t\turl_4=f'/bk/qtd/eval/tenders/{self.bidding_id}?format=json&page_size=999&page=1'\r\n\t\tresponse4=self.send_request('get',url_4,headers)\r\n\t\tsheet_id_1=response4.json()['data'][0]['id']\r\n\t\tsheet_id_2=response4.json()['data'][1]['id']\r\n\r\n\t\t\"\"\"选择议标类型\"\"\"\r\n\t\turl_5=f'/bk/qtd/eval/tender/content/{sheet_id_1}?format=json'\r\n\t\tresponse5=self.send_request('get',url_5,headers)\r\n\r\n\t\turl_5 = f'/bk/qtd/eval/tender/content/{sheet_id_2}?format=json'\r\n\t\tresponse5 = self.send_request('get', url_5, headers)\r\n\r\n\t\t\"\"\"提交评标审批\"\"\"\r\n\t\turl_1=f'/bk/qtd/eval/tenders/approval/{self.bidding_id}?format=json'\r\n\t\tbody = {'action': 'start', 'content': '测试数据007'}\r\n\t\tresponse1=self.send_request('post',url_1,headers,params=body)\r\n\r\n\t\t\"\"\"获取评标成员\"\"\"\r\n\t\turl_2=f'/bk/qtd/eval/tenders/approval/{self.bidding_id}?format=json'\r\n\t\tresponse2 = self.send_request('get', url_2, headers)\r\n\t\ttender_ids_1 = []\r\n\t\tfor tender_id in response2.json()['data']['flows'][0]['stages']:\r\n\t\t\tfor i in tender_id:\r\n\t\t\t\tif i['type'] == 'member':\r\n\t\t\t\t\ttender_ids_1.append({'name': i['title'], 'id': i['id'], 'approver_id': i['approver_id'], 'level': i['level']})\r\n\t\treturn tender_ids_1\r\n\r\n\t\"\"\"提交议标审批\"\"\"\r\n\tdef discuss_bidding(self,headers):\r\n\t\t\"\"\"获取标书信息\"\"\"\r\n\t\turl_1=f'/bk/qtd/eval/tenders/approval/{self.bidding_id}?format=json'\r\n\t\tresponse1=self.send_request('get',url_1,headers)\r\n\r\n\t\t\"\"\"触发议标\"\"\"\r\n\t\turl_6 = f'/bk/qtd/discuss/progress/{self.bidding_id}?format=json'\r\n\t\tresponse6=self.send_request('get',url_6,headers)\r\n\r\n\t\t\"\"\"查看议标进度\"\"\"\r\n\t\turl_7 = f'/bk/qtd/discuss/negotiation/{self.bidding_id}?format=json'\r\n\t\tresponse2 = self.send_request('get', url_7, headers)\r\n\r\n\t\t\"\"\"获取议标ID\"\"\"\r\n\t\turl_2=f'/bk/qtd/discuss/negotiation/{self.bidding_id}?format=json'\r\n\t\tresponse2=self.send_request('get',url_2,headers)\r\n\t\tsheet_id_1=response2.json()['data'][0]['id']\r\n\t\tsheet_id_2=response2.json()['data'][1]['id']\r\n\r\n\t\t\"\"\"获取付款比例ID\"\"\"\r\n\t\turl_3=f'/orderinfo/circle/circle_payrate/?page=1&key='\r\n\t\tresponse3=self.send_request('get',url_3,headers)\r\n\t\tpayrate_id=response3.json()['data']['payrates'][0]['id']\r\n\t\tself.set_payrate_id(payrate_id)\r\n\r\n\t\t\"\"\"选择付款比例\"\"\"\r\n\t\turl_4=f'/bk/qtd/discuss/negotiation/tender/payrate/{sheet_id_1}?format=json'\r\n\t\tbody={'action':'add','catelog_id':self.catelog_id,'scaling_payrate_id':'','pay_rate_id':self.payrate_id}\r\n\t\tresponse4 = self.send_request('post', url_4, headers, params=body)\r\n\r\n\t\turl_4 = f'/bk/qtd/discuss/negotiation/tender/payrate/{sheet_id_2}?format=json'\r\n\t\tbody = {'action': 'add', 'catelog_id': self.catelog_id, 'scaling_payrate_id': '', 'pay_rate_id': payrate_id}\r\n\t\tresponse4 = self.send_request('post', url_4, headers, params=body)\r\n\r\n\t\t\"\"\"上传议标记录\"\"\"\r\n\t\turl_5=f'/bk/qtd/discuss/negotiation/tender/records/{sheet_id_1}?format=json'\r\n\t\tbody={'action':'add'}\r\n\t\tfiles=self.file_param('discuss_record')\r\n\t\tresponse5=self.send_request('post',url_5,headers,params=body,files=files)\r\n\r\n\t\turl_5 = f'/bk/qtd/discuss/negotiation/tender/records/{sheet_id_2}?format=json'\r\n\t\tbody = {'action': 'add'}\r\n\t\tfiles = self.file_param('discuss_record')\r\n\t\tresponse5 = self.send_request('post', url_5, headers, params=body, files=files)\r\n\r\n\t\"\"\"厂家调价\"\"\"\r\n\tdef change_price(self,headers):\r\n\t\t\"\"\"获取调价ID\"\"\"\r\n\t\turl_1=f'/bk/qtd/tender/main/{self.bidding_id}?format=json'\r\n\t\tresponse1=self.send_request('get',url_1,headers)\r\n\t\tsheet_id=response1.json()['data']['id']\r\n\r\n\t\t\"\"\"厂家完成调价\"\"\"\r\n\t\turl_2=f'/bk/qtd/tender/quotations/sumbit/{sheet_id}?format=json'\r\n\t\tbody={'action':'submit'}\r\n\t\tresponse2=self.send_request('post',url_2,headers,params=body)\r\n\r\n\t\"\"\"调价完成,提交审批\"\"\"\r\n\tdef compare_change_price(self,headers):\r\n\t\t\"\"\"获取调价ID\"\"\"\r\n\t\turl_1=f'/bk/qtd/discuss/negotiation/{self.bidding_id}?format=json'\r\n\t\tresponse1=self.send_request('get',url_1,headers)\r\n\t\tsheet_id_1=response1.json()['data'][0]['id']\r\n\t\tsheet_id_2=response1.json()['data'][1]['id']\r\n\r\n\t\t\"\"\"调价结束\"\"\"\r\n\t\turl_2=f'/bk/qtd/discuss/negotiation/tender/approval/{sheet_id_1}?format=json'\r\n\t\tbody={'action':'approve','content':'测试数据007'}\r\n\t\tresponse2=self.send_request('post',url_2,headers,params=body)\r\n\r\n\t\turl_3 = f'/bk/qtd/discuss/negotiation/tender/approval/{sheet_id_2}?format=json'\r\n\t\tbody = {'action': 'approve', 'content': '测试数据007'}\r\n\t\tresponse3 = self.send_request('post', url_3, headers, params=body)\r\n\r\n\t\t\"\"\"获取审批成员\"\"\"\r\n\t\turl_4=f'/bk/qtd/discuss/negotiation/scaling/info/{sheet_id_1}?format=json'\r\n\t\tresponse4=self.send_request('get',url_4,headers)\r\n\t\tmembers={}\r\n\t\tfor tender_id in response4.json()['data']['flows'][0]['stages']:\r\n\t\t\tfor i in tender_id:\r\n\t\t\t\tif i['type'] == 'member':\r\n\t\t\t\t\tmembers[sheet_id_1]={'name': i['title'], 'id': i['id'], 'approver_id': i['approver_id'], 'level': i['level']}\r\n\r\n\t\turl_5 = f'/bk/qtd/discuss/negotiation/scaling/info/{sheet_id_2}?format=json'\r\n\t\tresponse5 = self.send_request('get', url_5, headers)\r\n\t\tfor tender_id in response5.json()['data']['flows'][0]['stages']:\r\n\t\t\tfor i in tender_id:\r\n\t\t\t\tif i['type'] == 'member':\r\n\t\t\t\t\tmembers[sheet_id_2] = {'name': i['title'], 'id': i['id'], 'approver_id': i['approver_id'], 'level': i['level']}\r\n\r\n\t\tprint(members)\r\n\t\treturn members\r\n\r\n\t\"\"\"调价审批\"\"\"\r\n\tdef prove_change_bidding(self,members):\r\n\t\tfor member in members:\r\n\t\t\tlogin_name=self.get_login_name(members[member]['id'])\r\n\t\t\theaders=self.get_headers(login_name)\r\n\r\n\t\t\t\"\"\"通过调价审批\"\"\"\r\n\t\t\turl=f'/bk/qtd/discuss/negotiation/tender/approval/{member}?format=json'\r\n\t\t\tbody={'action':'approve','content':'同意','approver_id':members[member]['approver_id']}\r\n\t\t\tresponse=self.send_request('post',url,headers,params=body)\r\n\r\n\t\"\"\"定标\"\"\"\r\n\tdef sure_bidding(self,headers):\r\n\t\t\"\"\"触发定标\"\"\"\r\n\t\turl_1=f'/bk/qtd/issue/sheet/info/{self.bidding_id}?format=json'\r\n\t\tresponse1=self.send_request('get',url_1,headers)\r\n\r\n\t\t\"\"\"获取定标ID\"\"\"\r\n\t\turl_1 = f'/bk/qtd/discuss/negotiation/{self.bidding_id}?format=json'\r\n\t\tresponse1 = self.send_request('get', url_1, headers)\r\n\t\tsheet_id_1 = response1.json()['data'][0]['id']\r\n\t\tsheet_id_2 = response1.json()['data'][1]['id']\r\n\r\n\t\t\"\"\"分批定标\"\"\"\r\n\t\turl_2=f'/bk/qtd/scaling/companys/progress/{self.bidding_id}?format=json'\r\n\t\tbody={'content':'测试数据007','scaling_ids':[sheet_id_1,sheet_id_2]}\r\n\t\tfiles=self.file_param('attach')\r\n\t\tresponse2=self.send_request('post',url_2,headers,params=body,files=files)\r\n\r\n\t\t\"\"\"获取定标审批成员\"\"\"\r\n\t\turl_3=f'/bk/qtd/scaling/companys/approval/{self.bidding_id}?format=json'\r\n\t\tresponse3=self.send_request('get',url_3,headers)\r\n\t\tmembers=[]\r\n\t\tfor tender_id in response3.json()['data']['flows'][0]['stages']:\r\n\t\t\tfor i in tender_id:\r\n\t\t\t\tif i['type'] == 'member':\r\n\t\t\t\t\tmembers.append({'name': i['title'], 'id': i['id'], 'approver_id': i['approver_id'], 'level': i['level']})\r\n\t\treturn members\r\n\r\n\t\"\"\"签约\"\"\"\r\n\tdef sign_bidding(self,headers):\r\n\t\t\"\"\"触发签约\"\"\"\r\n\t\turl_1=f'/bk/qtd/scaling/companys/approval/{self.bidding_id}?format=json'\r\n\t\tresponse1=self.send_request('get',url_1,headers)\r\n\r\n\t\t\"\"\"获取签约公司\"\"\"\r\n\t\turl_2=f'/bk/qtd/scaling/sales/sales/{self.bidding_id}?format=json'\r\n\t\tresponse2=self.send_request('get',url_2,headers)\r\n\t\tcompany_ids=[response2.json()['data'][0]['id'],response2.json()['data'][1]['id']]\r\n\r\n\t\tfor company_id in company_ids:\r\n\r\n\t\t\t\"\"\"获取签约商品ID\"\"\"\r\n\t\t\turl_3=f'/bk/qtd/scaling/sales/tender/sales/{company_id}?format=json&page=1&page_size=9999&payment_id='\r\n\t\t\tresponse3=self.send_request('get',url_3,headers)\r\n\t\t\tsale_id_1=response3.json()['data'][0]['id']\r\n\t\t\tsale_id_2=response3.json()['data'][1]['id']\r\n\r\n\t\t\t\"\"\"签约\"\"\"\r\n\t\t\turl_4=f'/bk/qtd/scaling/sales/tender/sales/{company_id}?format=json'\r\n\t\t\tbody={'action':'scaling','quotation_ids':[sale_id_1,sale_id_2]}\r\n\t\t\tresponse4=self.send_request('post',url_4,headers,params=body)\r\n\r\n\t\t\t\"\"\"上传战略合作协议\"\"\"\r\n\t\t\turl_5=f'/bk/qtd/contract/com/contracts/{company_id}?format=json'\r\n\t\t\tbody={'action':'add','type':'contract_strategy'}\r\n\t\t\tfiles=self.file_param('contract')\r\n\t\t\tresponse5=self.send_request('post',url_5,headers,params=body,files=files)\r\n\r\n\t\t\t\"\"\"补充协议\"\"\"\r\n\t\t\tbody = {'action':'add', 'type':'contract_replenish','payment_id':self.payrate_id}\r\n\t\t\tresponse5 = self.send_request('post', url_5, headers, params=body, files=files)\r\n\r\n\t\t\t\"\"\"商品上架\"\"\"\r\n\t\t\turl_5=f'/bk/qtd/scaling/sales/tender/onsale/{company_id}?format=json'\r\n\t\t\tbody={'action':'onsale','quotation_ids':[sale_id_1,sale_id_2]}\r\n\t\t\tresponse5 = self.send_request('post', url_5, headers, params=body)\r\n\r\n\tdef main_bidding(self):\r\n\t\tchangshang_bidding_cookies = self.get_headers('SKS123')\r\n\t\tself.changshang_cookies=self.get_headers('caoyuehua')\r\n\t\tself.huayuansu_cookies=self.get_headers('duanye')\r\n\t\tscs_company1='21'\r\n\t\tscs_company2='86'\r\n\r\n\t\theaders = self.huayuansu_cookies\r\n\t\tmembers = self.crete_bidding(headers)\r\n\t\tfor member in members:\r\n\t\t\tself.approve_bidding(member, '发标')\r\n\t\tself.bindding_information(headers)\r\n\t\theaders = self.changshang_cookies\r\n\t\tself.submite_approve_for_bidding(headers)\r\n\t\theaders = changshang_bidding_cookies\r\n\t\tself.submite_approve_for_bidding(headers)\r\n\t\theaders = self.huayuansu_cookies\r\n\t\ttender_informations = self.approve_author(headers)\r\n\t\tself.appove_for_tender(tender_informations)\r\n\t\theaders = self.changshang_cookies\r\n\t\tself.bidding_product(headers)\r\n\t\theaders = changshang_bidding_cookies\r\n\t\tself.bidding_product(headers)\r\n\t\theaders = self.huayuansu_cookies\r\n\t\tmembers = self.back_bidding(headers)\r\n\t\tfor member in members:\r\n\t\t\tself.approve_bidding(member, '回标')\r\n\t\tmembers = self.min_price_approve(headers)\r\n\t\tfor member in members:\r\n\t\t\tself.approve_bidding(member, '最低价')\r\n\t\tmembers = self.evaluate_bidding(headers)\r\n\t\tfor member in members:\r\n\t\t\tself.approve_bidding(member, '评标')\r\n\t\tself.discuss_bidding(headers)\r\n\t\theaders = changshang_bidding_cookies\r\n\t\tself.change_price(headers)\r\n\t\theaders = self.changshang_cookies\r\n\t\tself.change_price(headers)\r\n\t\theaders = self.huayuansu_cookies\r\n\t\tmembers = self.compare_change_price(headers)\r\n\t\tself.prove_change_bidding(members)\r\n\t\tmembers = self.sure_bidding(headers)\r\n\t\tfor member in members:\r\n\t\t\tself.approve_bidding(member, '定标')\r\n\t\tself.sign_bidding(headers)\r\n\r\nif __name__ == '__main__':\r\n\thost='http://dev.echronos.com:10460'\r\n\ttest=Bidding(host)\r\n\t# headers=test.get_headers('13608141521')\r\n\t# test.submite_approve_for_bidding(headers)\r\n\tusers = {\r\n\t\t'shigongfang': {'fuzheren': 'caoyuehua', 'account_type': 5},\r\n\t\t'hualifang': {'fuzheren': 'duanye', 'caiwu': 'wangjialehys1', 'dingdan': 'chengzhefeng', 'shenpi': 'monica',\r\n\t\t\t\t\t 'account_type': 1},\r\n\t\t'qudaoshang': {'fuzheren': 'houdong', 'account_type': 4},\r\n\t\t'huayuansu': {'fuzheren': 'duanye', 'caiwu': 'wangjialehys1', 'dingdan': 'wangjialehys1', 'shenpi': 'monica',\r\n\t\t\t\t\t 'account_type': 2},\r\n\t\t'changshang': {'fuzheren': 'nuobeier', 'other': 'SKS123', 'account_type': 3}\r\n\t}\r\n\ttest.main_bidding()\r\n\t# headers=test.get_headers(users['huayuansu']['fuzheren'])\r\n\t# test.bidding_product(headers)\r\n\t# test.evaluate_bidding(headers)\r\n\t# test.discuss_bidding(headers)\r\n\t# test.compare_change_price(headers)\r\n\t# test.discuss_bidding(headers)\r\n\t# test.get_mysql_name()", "sub_path": "demo/automation_test/api_test/bidding.py", "file_name": "bidding.py", "file_ext": "py", "file_size_in_byte": 29930, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "requests.Session", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 26, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 50, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 58, "usage_type": "call"}, {"api_name": "automation_test.api_test.login_information.Login_envirment", "line_number": 63, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 68, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 68, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 68, "usage_type": "call"}, {"api_name": "automation_test.conn_mysql.Operation_mysql", "line_number": 76, "usage_type": "call"}, {"api_name": "automation_test.conn_mysql", "line_number": 76, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 79, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 79, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 79, "usage_type": "call"}, {"api_name": "automation_test.conn_mysql.Operation_mysql", "line_number": 92, "usage_type": "call"}, {"api_name": "automation_test.conn_mysql", "line_number": 92, "usage_type": "name"}, {"api_name": "time.time", "line_number": 115, "usage_type": "call"}, {"api_name": "jsonpath.jsonpath", "line_number": 149, "usage_type": "call"}, {"api_name": "jsonpath.jsonpath", "line_number": 150, "usage_type": "call"}, {"api_name": "jsonpath.jsonpath", "line_number": 151, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 157, "usage_type": "call"}, {"api_name": "jsonpath.jsonpath", "line_number": 303, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 386, "usage_type": "call"}]} +{"seq_id": "619338097", "text": "import pickle\nimport warnings\nfrom collections import ChainMap\nfrom collections.abc import Sequence\n\nfrom .mult_request import (MultipleServerRequestHandler,\n MultipleServerRequestHandlerPreserveIDs)\nfrom .server_response import ServerResponse\n\nBITRIX_PAGE_SIZE = 50\n\nclass UserRequestAbstract():\n\n def __init__(self, srh, method: str, params: dict):\n self.srh = srh\n self.method = self.standardized_method(method)\n self.params = self.standardized_params(params) if params else None\n self.check_special_limitations()\n \n\n def standardized_method(self, method):\n if not method:\n raise TypeError('Method cannot be empty')\n \n if not isinstance(method, str):\n raise TypeError('Method should be a str')\n\n method = method.lower().strip()\n\n if method.lower().strip() == 'batch':\n raise ValueError(\"Method cannot be 'batch'. Use call_batch() instead.\")\n \n return method\n \n \n def standardized_params(self, p):\n if not isinstance(p, dict):\n raise TypeError('Params agrument should be a dict')\n\n for key, __ in p.items():\n if not isinstance(key, str):\n raise TypeError('Keys in params argument should be strs')\n\n p = {key.lower().strip(): value for key, value in p.items()}\n\n self.check_expected_clause_types(p)\n\n return p\n\n\n def check_expected_clause_types(self, p):\n EXPECTED_TYPES = {\n 'select': list,\n 'halt': int,\n 'cmd': dict,\n 'limit': int,\n 'order': dict,\n 'filter': dict,\n 'start': int,\n 'fields': dict\n }\n\n # check for allowed types of key values\n for clause_key, clause_value in p.items():\n if clause_key in EXPECTED_TYPES:\n expected_type = EXPECTED_TYPES[clause_key]\n\n type_ok = isinstance(clause_value, expected_type)\n if expected_type == list:\n list_error = not any(\n isinstance(clause_value, x) for x in [list, tuple, set]\n )\n else:\n list_error = False\n\n if not type_ok or list_error:\n raise TypeError(f'Clause \"{clause_key}\" should be of type {expected_type}, '\n f'but its type is {type(clause_value)}')\n\n\n def check_special_limitations(self):\n raise NotImplementedError\n \n \nclass GetAllUserRequest(UserRequestAbstract):\n def check_special_limitations(self):\n if self.params and not set(self.params.keys()).isdisjoint(\n {'start', 'limit', 'order'}\n ):\n raise ValueError(\"get_all() doesn't support parameters 'start', 'limit' or 'order'\")\n\n \n async def run(self):\n self.add_order_parameter()\n\n await self.make_first_request()\n\n if self.first_response.more_results_expected():\n await self.make_remaining_requests()\n self.dedup_results()\n \n return self.results\n\n\n def add_order_parameter(self):\n # необходимо установить порядок сортировки, иначе сортировка будет рандомная\n # и сущности будут повторяться на разных страницах\n \n order_clause = {'order': {'ID': 'ASC'}}\n \n if self.params:\n if 'order' not in self.params:\n self.params.update(order_clause)\n else:\n self.params = order_clause\n\n \n async def make_first_request(self):\n self.srh.add_request_task(self.method, self.params)\n self.first_response = await next(self.srh.get_server_serponses())\n self.results, self.total = self.first_response.result, self.first_response.total \n\n\n async def make_remaining_requests(self):\n item_list = (\n ChainMap({'start': start}, self.params)\n for start in range(len(self.results), self.total, BITRIX_PAGE_SIZE)\n )\n remaining_results = await MultipleServerRequestHandler(\n self.srh,\n method = self.method,\n item_list = item_list,\n real_len = self.total, \n real_start = len(self.results)\n ).run()\n \n self.results.extend(remaining_results)\n\n\n def dedup_results(self):\n # дедупликация через сериализацию, превращение в set и десериализацию\n self.results = (\n [pickle.loads(y) for y in {pickle.dumps(x) for x in self.results}]\n if self.results\n else []\n )\n\n\n if len(self.results) != self.total:\n warnings.warn(f\"Number of results returned ({len(self.results)}) \"\n f\"doesn't equal 'total' from the server reply ({self.total})\",\n RuntimeWarning)\n\n\nclass GetByIDUserRequest(UserRequestAbstract):\n def __init__(self, srh, method: str, params: dict, ID_list, ID_field_name):\n self.ID_list = ID_list\n self.ID_field_name = ID_field_name.upper().strip()\n super().__init__(srh, method, params)\n \n \n def check_special_limitations(self):\n if self.params and 'id' in self.params.keys():\n raise ValueError(\"get_by_ID() doesn't support parameter 'ID' within the 'params' argument\")\n\n if not(isinstance(self.ID_list, Sequence)):\n raise TypeError(\"get_by_ID(): 'ID_list' should be a sequence\")\n\n\n async def run(self):\n if self.list_empty():\n return []\n \n self.prepare_item_list()\n \n results = await MultipleServerRequestHandlerPreserveIDs(\n self.srh,\n self.method,\n self.item_list,\n ID_field=self.ID_field_name\n ).run()\n \n return results\n\n \n def list_empty(self):\n return len(self.ID_list) == 0\n \n \n def prepare_item_list(self):\n if self.params:\n self.item_list = [\n ChainMap({self.ID_field_name: ID}, self.params) \n for ID in self.ID_list\n ]\n else:\n self.item_list = [\n {self.ID_field_name: ID} \n for ID in self.ID_list\n ] \n\n\nclass CallUserRequest(GetByIDUserRequest):\n def __init__(self, srh, method: str, item_list):\n self.item_list = [self.standardized_params(item) for item in item_list]\n super().__init__(srh, method, None, None, '__order')\n\n \n def check_special_limitations(self):\n if not isinstance(self.item_list, Sequence):\n raise TypeError(\"call(): 'item_list' should be a sequence\")\n\n async def run(self):\n results = await super().run()\n \n # убираем поле с порядковым номером из результатов\n return [item[1] for item in results]\n\n\n def list_empty(self):\n return len(self.item_list) == 0\n\n \n def prepare_item_list(self):\n # добавим порядковый номер\n self.item_list = [\n ChainMap(item, {self.ID_field_name: 'order' + str(i)}) \n for i, item in enumerate(self.item_list)\n ]\n\n\nclass BatchUserRequest(UserRequestAbstract):\n\n def __init__(self, srh, params):\n super().__init__(srh, 'batch', params)\n\n\n def standardized_method(self, method):\n return 'batch'\n\n\n def check_special_limitations(self):\n if not self.params:\n raise ValueError(\"Params for a batch call can't be empty\")\n \n if {'halt', 'cmd'} != self.params.keys():\n raise ValueError(\"Params for a batch call should contain only 'halt' and 'cmd' clauses at the highest level\")\n\n if not isinstance(self.params['cmd'], dict):\n raise ValueError(\"'cmd' clause should contain a dict\")\n \n\n async def run(self):\n self.srh.add_request_task(self.method, self.params)\n response = await next(self.srh.get_server_serponses())\n return ServerResponse(response.result).result\n", "sub_path": "fast_bitrix24/user_request.py", "file_name": "user_request.py", "file_ext": "py", "file_size_in_byte": 8255, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "collections.ChainMap", "line_number": 126, "usage_type": "call"}, {"api_name": "mult_request.MultipleServerRequestHandler", "line_number": 129, "usage_type": "call"}, {"api_name": "pickle.loads", "line_number": 143, "usage_type": "call"}, {"api_name": "pickle.dumps", "line_number": 143, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 150, "usage_type": "call"}, {"api_name": "collections.abc.Sequence", "line_number": 166, "usage_type": "argument"}, {"api_name": "mult_request.MultipleServerRequestHandlerPreserveIDs", "line_number": 176, "usage_type": "call"}, {"api_name": "collections.ChainMap", "line_number": 193, "usage_type": "call"}, {"api_name": "collections.abc.Sequence", "line_number": 210, "usage_type": "argument"}, {"api_name": "collections.ChainMap", "line_number": 227, "usage_type": "call"}, {"api_name": "server_response.ServerResponse", "line_number": 256, "usage_type": "call"}]} +{"seq_id": "17344002", "text": "import socket\nimport json\n\nfrom flask import Flask, render_template, send_file\nfrom app import app, socketio\nfrom ironcar import *\n\nwith open(CONFIG) as json_file:\n\tconfig = json.load(json_file)\n\tMODELS_PATH = config['models_path']\n\n# ------- WEB PAGES --------\n@app.route('/')\ndef main():\n\tmodels = []\n\tif os.path.isdir(MODELS_PATH):\n\t\tmodels = [os.path.join(MODELS_PATH, f) for f in os.listdir(MODELS_PATH) if f.endswith('.hdf5')]\n\tprint('SERVER : models : ', models)\n\treturn render_template('index.html', models=models)\n\n\n@app.route('/commands')\ndef commands():\n\tcommands = ironcar.commands\n\tprint('SERVER : commands : ', commands)\n\treturn render_template('commands.html', commands=commands)\n\n\n@app.route('/picture')\ndef picture():\n\t\"\"\"\n\tGenerate a picture, save it, and send it to client\n\t\"\"\"\n\tpath_picture = ironcar.picture()\n\tprint('path_picture : ', path_picture)\n\n\tif path_picture:\n\t\tr = send_file(path_picture,\n\t\t\t\t\t\tas_attachment=True)\n\t\tr.headers[\"Pragma\"] = \"no-cache\"\n\t\tr.headers[\"Expires\"] = \"0\"\n\t\tr.headers['Cache-Control'] = 'public, max-age=0'\n\t\treturn r\n\treturn None\n\n\n# ------- SOCKETS ----------\n@socketio.on('mode_update')\ndef mode_update(mode):\n\t\"\"\"\n\tChange the driving mode of the car\n\t\"\"\"\n\tprint('SERVER : mode: ' + mode)\n\tironcar.switch_mode(mode)\n\n\n@socketio.on('model_update')\ndef model_update(model):\n\t\"\"\"\n\tChange the machine learning model used by the car\n\t\"\"\"\n\tsocketio.emit('msg2user', {'type': 'info', 'msg': 'Loading model {}...'.format(model)}, namespace='/car')\n\tprint('SERVER : model update: ' + model)\n\tironcar.select_model(model)\n\n\n@socketio.on('starter')\ndef handle_starter():\n\t\"\"\"\n\tStart / Stop the car\n\t\"\"\"\n\tprint('SERVER : starter switch')\n\tstate = ironcar.on_start()\n\tsocketio.emit('starter_switch', {'activated': state}, namespace='/car') # switch it\n\n\n@socketio.on('max_speed_update')\ndef update_max_speed(speed):\n\t\"\"\"\n\tLet the user defines a max speed for the car\n\t\"\"\"\n\tnew_speed = ironcar.max_speed_update(speed)\n\tprint(speed)\n\tprint('SERVER : max speed update received: ' + str(speed))\n\tsocketio.emit('max_speed_update_callback', {'speed': new_speed}, namespace='/car') # switch it\n\n\n@socketio.on('gas')\ndef handle_gas(gas):\n\t\"\"\"\n\tSend a gas order for manual mode\n\t\"\"\"\n\tprint('SERVER : gas order: ' + str(gas))\n\tironcar.on_gas(gas)\n\n\n@socketio.on('dir')\ndef handle_dir(direction):\n\t\"\"\"\n\tSend a dir order for manual mode\n\t\"\"\"\n\tprint('SERVER : dir : ' + str(direction))\n\tironcar.on_dir(direction)\n\n\n@socketio.on('streaming_starter')\ndef handle_streaming():\n\t\"\"\"\n\tTo start / stop the streaming mode\n\t\"\"\"\n\tprint('SERVER : streaming switch')\n\tironcar.switch_streaming()\n\tsocketio.emit('stream_switch', {'activated': ironcar.streaming_state}, namespace='/car') # switch it\n\n\n@socketio.on('command_update')\ndef handle_config(data):\n\t\"\"\"\n\tTo start / stop the streaming mode\n\t\"\"\"\n\tprint('SERVER : command update')\n\n\tcommand = data['command']\n\tvalue = data['value']\n\n\t# Check for wrong inputs\n\ttry:\n\t\tvalue = int(value)\n\texcept Exception as e:\n\t\tprint('`{}` cannot be cast to int'.format(value))\n\t\treturn\n\n\t# Modify the config file\n\twith open(CONFIG) as json_file:\n\t\tconfig = json.load(json_file)\n\n\tif command not in config['commands']:\n\t\tprint('The command `{}` is not available in config'.format(command))\n\t\treturn\n\n\tif command == 'invert_dir':\n\t\tconfig['commands'][command] = int(value) * config['commands'][command]\n\telse:\n\t\tconfig['commands'][command] = int(value)\n\n\twith open(CONFIG, 'w') as fp:\n\t\tfp.write(json.dumps(config, indent=4))\n\n\t# Load the modified config file in ironcar\n\tironcar.load_config()\n\n\n@socketio.on('verbose')\ndef handle_verbose(verbose):\n\t\"\"\"\n\tHandle verbose of ironcar\n\t\"\"\"\n\tprint('SERVER : verbose switch')\n\tironcar.switch_verbose(verbose)\n\n\nif __name__ == '__main__':\n\n\tIP = (([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith(\"127.\")] or [[(s.connect((\"8.8.8.8\", 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) + [\"no IP found\"])[0]\n\tPORT = 5000\n\n\tprint('#' * 50)\n\tprint('# IRONCAR SERVER')\n\tprint('# Go to the url: {}:{}'.format(IP, PORT))\n\tprint('#' * 50)\n\n\tironcar = Ironcar()\n\tsocketio.run(app, host='0.0.0.0')\n\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4179, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "json.load", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 19, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 13, "usage_type": "call"}, {"api_name": "app.app", "line_number": 13, "usage_type": "name"}, {"api_name": "ironcar.commands", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 26, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 22, "usage_type": "call"}, {"api_name": "app.app", "line_number": 22, "usage_type": "name"}, {"api_name": "ironcar.picture", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.send_file", "line_number": 38, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 29, "usage_type": "call"}, {"api_name": "app.app", "line_number": 29, "usage_type": "name"}, {"api_name": "ironcar.switch_mode", "line_number": 54, "usage_type": "call"}, {"api_name": "app.socketio.on", "line_number": 48, "usage_type": "call"}, {"api_name": "app.socketio", "line_number": 48, "usage_type": "name"}, {"api_name": "app.socketio.emit", "line_number": 62, "usage_type": "call"}, {"api_name": "app.socketio", "line_number": 62, "usage_type": "name"}, {"api_name": "ironcar.select_model", "line_number": 64, "usage_type": "call"}, {"api_name": "app.socketio.on", "line_number": 57, "usage_type": "call"}, {"api_name": "app.socketio", "line_number": 57, "usage_type": "name"}, {"api_name": "ironcar.on_start", "line_number": 73, "usage_type": "call"}, {"api_name": "app.socketio.emit", "line_number": 74, "usage_type": "call"}, {"api_name": "app.socketio", "line_number": 74, "usage_type": "name"}, {"api_name": "app.socketio.on", "line_number": 67, "usage_type": "call"}, {"api_name": "app.socketio", "line_number": 67, "usage_type": "name"}, {"api_name": "ironcar.max_speed_update", "line_number": 82, "usage_type": "call"}, {"api_name": "app.socketio.emit", "line_number": 85, "usage_type": "call"}, {"api_name": "app.socketio", "line_number": 85, "usage_type": "name"}, {"api_name": "app.socketio.on", "line_number": 77, "usage_type": "call"}, {"api_name": "app.socketio", "line_number": 77, "usage_type": "name"}, {"api_name": "ironcar.on_gas", "line_number": 94, "usage_type": "call"}, {"api_name": "app.socketio.on", "line_number": 88, "usage_type": "call"}, {"api_name": "app.socketio", "line_number": 88, "usage_type": "name"}, {"api_name": "ironcar.on_dir", "line_number": 103, "usage_type": "call"}, {"api_name": "app.socketio.on", "line_number": 97, "usage_type": "call"}, {"api_name": "app.socketio", "line_number": 97, "usage_type": "name"}, {"api_name": "ironcar.switch_streaming", "line_number": 112, "usage_type": "call"}, {"api_name": "app.socketio.emit", "line_number": 113, "usage_type": "call"}, {"api_name": "app.socketio", "line_number": 113, "usage_type": "name"}, {"api_name": "ironcar.streaming_state", "line_number": 113, "usage_type": "attribute"}, {"api_name": "app.socketio.on", "line_number": 106, "usage_type": "call"}, {"api_name": "app.socketio", "line_number": 106, "usage_type": "name"}, {"api_name": "json.load", "line_number": 135, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 147, "usage_type": "call"}, {"api_name": "ironcar.load_config", "line_number": 150, "usage_type": "call"}, {"api_name": "app.socketio.on", "line_number": 116, "usage_type": "call"}, {"api_name": "app.socketio", "line_number": 116, "usage_type": "name"}, {"api_name": "ironcar.switch_verbose", "line_number": 159, "usage_type": "call"}, {"api_name": "app.socketio.on", "line_number": 153, "usage_type": "call"}, {"api_name": "app.socketio", "line_number": 153, "usage_type": "name"}, {"api_name": "socket.gethostbyname_ex", "line_number": 164, "usage_type": "call"}, {"api_name": "socket.gethostname", "line_number": 164, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 164, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 164, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 164, "usage_type": "attribute"}, {"api_name": "app.socketio.run", "line_number": 173, "usage_type": "call"}, {"api_name": "app.app", "line_number": 173, "usage_type": "argument"}, {"api_name": "app.socketio", "line_number": 173, "usage_type": "name"}]} +{"seq_id": "60575657", "text": "import csv, re\nfrom collections import Counter\n\n# bigram counter for Buckeye corpus; ignores fillers like etc. all of which are in angled brackets\n\n# this is the csv file of bigrams (actually list of trigrams to be parsed as bigrams) to be counted\n\ndata = 'iimpo.csv'\n\n# this reads the csv file into memory; each line now is stored as a list [word1, word2, word3]\n\ncsvDictionary = {}\n\ncnt = 0\nwith open(data, 'r') as f:\n\treader = csv.reader(f)\n\tnext(reader)\n\tfor line in reader:\n\t\tcsvDictionary[cnt]=line\n\t\tcnt+=1\n\n\nwith open('buckeyetext.txt', 'r') as g:\n\ttext = \"\"\n\twhile True:\n\t\tline = g.readline()\n\t\tif not line:break\n\t\ttext += line\n\n\nmyRegex = '\\s*(<[a-zA-Z]*>\\s*)*'\n\nc = Counter()\n\nfor v in csvDictionary.values():\n\n\tfor i in range(2):\n\n\t\t# this still double counts strings containing an apostrophe\n\t\tregex = '('+v[i]+myRegex+'\\\\b'+v[i+1]+'\\\\b)'\n\n\t\tkey = v[i]+' '+v[i+1]\n\n\t\tif key not in c:\n\t\t\n\t\t\tif re.findall(regex, text):\n\n\t\t\t\tfound = re.findall(regex, text)\n\n\t\t\t\tfor i in range(len(found)):\n\n\t\t\t\t\ttup = found[0][0]\n\t\t\t\t\tfiller = found[0][1]\n\n\t\t\t\t\t# this is just for visualization purposes; notice the key is counted in the end\n\t\t\t\t\tstring2count = tup.replace(filler, '')\n\t\t\t\t\tprint(tup, '--', filler, '--', string2count)\n\t\t\t\t\tprint(key)\n\t\t\t\t\t\n\t\t\t\t\tc[key]+=1\n\t\t\t\n# for k, v in c.items():\n# \tprint(k, v)\n\nwith open('bigramfreq.csv', 'w') as csvfile:\n\tfieldnames = ('trigram', 'freqBigram1', 'freqBigram2')\n\twriter=csv.writer(csvfile)\n\twriter.writerow(fieldnames)\n\n\tfor v in csvDictionary.values():\n\n\t\tkey1 = v[0]+' '+v[1]\n\t\tkey2 = v[1]+' '+v[2]\n\n\t\tfreqBigram1 = c[key1]\n\t\tfreqBigram2 = c[key2]\n\n\t\twriter.writerow((v, freqBigram1, freqBigram2))\n\n\n\n\n\n\n\n\n\n\n\t\t\n", "sub_path": "countBuckeyeBigrams.py", "file_name": "countBuckeyeBigrams.py", "file_ext": "py", "file_size_in_byte": 1676, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "csv.reader", "line_number": 16, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 33, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 46, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 48, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "361544184", "text": "# coding=utf-8\n\nimport codecs\n\nimport gensim\nimport jieba\nimport numpy as np\nimport scipy as sp\nimport matplotlib.pyplot as plt\nfrom matplotlib.font_manager import FontProperties\n\nplt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签\n# x = range(10)\n# plt.plot(x)\n# plt.title(\"中文\")\n# plt.show()\nwith codecs.open('names.txt', encoding=\"utf8\") as f:\n # 去掉结尾的换行符\n data = [line.strip() for line in f]\n\nnovels = data[::2]\nnames = data[1::2]\n\nnovel_names = {k: v.split() for k, v in zip(novels, names)}\n\n# for name in novel_names['天龙八部'][:20]:\n# print(name)\n\n# def find_main_charecters(novel, num=10):\n# with codecs.open('novels/{}.txt'.format(novel), encoding=\"utf8\") as f:\n# data = f.read()\n# chars = novel_names[novel]\n# count = list(map(lambda x: data.count(x), chars))\n# idx = np.argsort(count)\n#\n# plt.barh(range(num), count[idx[-num:][0]], color='red', align='center')\n# plt.title(novel, fontsize=14)\n# plt.yticks(range(num), chars[idx[-num:][0]], fontsize=14)\n# plt.show()\n#\n#\n# find_main_charecters(\"天龙八部\")\n# find_main_charecters(\"射雕英雄传\")\n# find_main_charecters(\"神雕侠侣\")\n# find_main_charecters(\"倚天屠龙记\")\n\nfor _, names in novel_names.items():\n for name in names:\n jieba.add_word(name)\n\nwith codecs.open(\"kungfu.txt\", encoding=\"utf8\") as f:\n kungfu_names = [line.strip() for line in f]\nwith codecs.open(\"bangs.txt\", encoding=\"utf8\") as f:\n bang_names = [line.strip() for line in f]\n\nfor name in kungfu_names:\n jieba.add_word(name)\n\nfor name in bang_names:\n jieba.add_word(name)\nnovels = [\"书剑恩仇录\",\n \"天龙八部\",\n \"碧血剑\",\n \"越女剑\",\n \"飞狐外传\",\n \"侠客行\",\n \"射雕英雄传\",\n \"神雕侠侣\",\n \"连城诀\",\n \"鸳鸯刀\",\n \"倚天屠龙记\",\n \"白马啸西风\",\n \"笑傲江湖\",\n \"雪山飞狐\",\n \"鹿鼎记\"]\n\n# novels = [\"天龙八部\"]\n\nsentences = []\n\nfor novel in novels:\n print(\"处理:{}\".format(novel))\n with codecs.open('novels/{}.txt'.format(novel), encoding=\"utf8\") as f:\n sentences += [list(jieba.cut(line.strip())) for line in f]\n\nmodel = gensim.models.Word2Vec(sentences,\n size=100,\n window=5,\n min_count=5,\n workers=4)\n# for k, s in model.most_similar(positive=[\"乔峰\", \"萧峰\"]):\n# print(k, s)\n#\n# for k, s in model.most_similar(positive=[\"阿朱\"]):\n# print(k, s)\n#\n# for k, s in model.most_similar(positive=[\"丐帮\"]):\n# print(k, s)\n#\n# for k, s in model.most_similar(positive=[\"降龙十八掌\"]):\n# print(k, s)\n\n\ndef find_relationship(a, b, c):\n \"\"\"\n 返回 d\n a与b的关系,跟c与d的关系一样\n \"\"\"\n d, _ = model.most_similar(positive=[c, b], negative=[a])[0]\n print(\"给定“{}”与“{}”,“{}”和“{}”有类似的关系\".format(a, b, c, d))\n\n\nfind_relationship(\"段誉\", \"段公子\", \"乔峰\")\n\n# 情侣对\nfind_relationship(\"郭靖\", \"黄蓉\", \"杨过\")\n# 岳父女婿\nfind_relationship(\"令狐冲\", \"任我行\", \"郭靖\")\n# 非情侣\nfind_relationship(\"郭靖\", \"华筝\", \"杨过\")\n\n# # 韦小宝\n# find_relationship(\"杨过\", \"小龙女\", \"韦小宝\")\n# find_relationship(\"令狐冲\", \"盈盈\", \"韦小宝\")\n# find_relationship(\"张无忌\", \"赵敏\", \"韦小宝\")\n\nfind_relationship(\"郭靖\", \"降龙十八掌\", \"黄蓉\")\nfind_relationship(\"武当\", \"张三丰\", \"少林\")\nfind_relationship(\"任我行\", \"魔教\", \"令狐冲\")\n\n# all_names = np.array(list(filter(lambda c: c in model, novel_names[\"天龙八部\"])))\n# word_vectors = np.array(list(map(lambda c: model[c], all_names)))\n#\n# from sklearn.cluster import KMeans\n#\n# N = 3\n#\n# label = KMeans(N).fit(word_vectors).labels_\n#\n# for c in range(N):\n# print(\"\\n类别{}:\".format(c + 1))\n# for idx, name in enumerate(all_names[label == c]):\n# print(name, )\n# if idx % 10 == 9:\n# print()\n# print()\n#\n# N = 4\n#\n# c = sp.stats.mode(label).mode\n#\n# remain_names = all_names[label != c]\n# remain_vectors = word_vectors[label != c]\n# remain_label = KMeans(N).fit(remain_vectors).labels_\n#\n# for c in range(N):\n# print(\"\\n类别{}:\".format(c + 1))\n# for idx, name in enumerate(remain_names[remain_label == c]):\n# print(name, )\n# if idx % 10 == 9:\n# print()\n# print()\n#\n# import scipy.cluster.hierarchy as sch\n#\n# Y = sch.linkage(word_vectors, method=\"ward\")\n#\n# _, ax = plt.subplots(figsize=(10, 40))\n#\n# Z = sch.dendrogram(Y, orientation='right')\n# idx = Z['leaves']\n#\n# ax.set_xticks([])\n# ax.set_yticklabels(all_names[idx])\n# ax.set_frame_on(False)\n#\n# plt.show()\n# all_names = np.array(list(filter(lambda c: c in model, kungfu_names)))\n# word_vectors = np.array(list(map(lambda c: model[c], all_names)))\n#\n# Y = sch.linkage(word_vectors, method=\"ward\")\n#\n# _, ax = plt.subplots(figsize=(10, 35))\n#\n# Z = sch.dendrogram(Y, orientation='right')\n#\n# idx = Z['leaves']\n#\n# ax.set_xticks([])\n#\n# ax.set_yticklabels(all_names[idx])\n#\n# ax.set_frame_on(False)\n#\n# plt.show()\n#\n# all_names = np.array(list(filter(lambda c: c in model, bang_names)))\n# word_vectors = np.array(list(map(lambda c: model[c], all_names)))\n#\n# all_names = np.array(all_names)\n#\n# Y = sch.linkage(word_vectors, method=\"ward\")\n#\n# _, ax = plt.subplots(figsize=(10, 25))\n#\n# Z = sch.dendrogram(Y, orientation='right')\n#\n# idx = Z['leaves']\n#\n# ax.set_xticks([])\n#\n# ax.set_yticklabels(all_names[idx])\n#\n# ax.set_frame_on(False)\n#\n# plt.show()\n", "sub_path": "NLP-word2vec.py", "file_name": "NLP-word2vec.py", "file_ext": "py", "file_size_in_byte": 5673, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "matplotlib.pyplot.rcParams", "line_number": 12, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "codecs.open", "line_number": 17, "usage_type": "call"}, {"api_name": "jieba.add_word", "line_number": 49, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 51, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 53, "usage_type": "call"}, {"api_name": "jieba.add_word", "line_number": 57, "usage_type": "call"}, {"api_name": "jieba.add_word", "line_number": 60, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 83, "usage_type": "call"}, {"api_name": "jieba.cut", "line_number": 84, "usage_type": "call"}, {"api_name": "gensim.models.Word2Vec", "line_number": 86, "usage_type": "call"}, {"api_name": "gensim.models", "line_number": 86, "usage_type": "attribute"}]} +{"seq_id": "74588847", "text": "import torch\nfrom torch.autograd import Variable\nfrom tensorboardX import SummaryWriter\n\nfrom models.model import UNet\nfrom dataloader.loader import Mydata\nfrom options.train_option import opt\n\ndef demo():\n Model = UNet(num_classes=3).cuda()\n DataLoader = Mydata.getLoader()\n Optimizer = torch.optim.Adam(Model.parameters())\n loss_fn = torch.nn.MSELoss()\n writer = SummaryWriter(opt.log_dir)\n for epoch in range(opt.epoch_num):\n for (i,data) in enumerate(DataLoader):\n before = Variable(data[0]).cuda().float()\n after = Variable(data[1]).cuda().float()\n stroke = Variable(data[2]).cuda().float()\n\n before = before.permute(0,3,1,2)\n after = after.permute(0,3,1,2)\n stroke = stroke.permute(0,2,1)\n output = Model(before,stroke)\n\n loss = loss_fn(after,output)\n Optimizer.zero_grad()\n loss.backward()\n Optimizer.step()\n\n writer.add_scalar('loss',loss.item(),i)\n print(before.shape,output.shape,loss.item())\n\n\nif __name__ == '__main__':\n demo()", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 1110, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "models.model.UNet", "line_number": 10, "usage_type": "call"}, {"api_name": "dataloader.loader.Mydata.getLoader", "line_number": 11, "usage_type": "call"}, {"api_name": "dataloader.loader.Mydata", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 12, "usage_type": "attribute"}, {"api_name": "torch.nn.MSELoss", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "attribute"}, {"api_name": "tensorboardX.SummaryWriter", "line_number": 14, "usage_type": "call"}, {"api_name": "options.train_option.opt.log_dir", "line_number": 14, "usage_type": "attribute"}, {"api_name": "options.train_option.opt", "line_number": 14, "usage_type": "name"}, {"api_name": "options.train_option.opt.epoch_num", "line_number": 15, "usage_type": "attribute"}, {"api_name": "options.train_option.opt", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.autograd.Variable", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "443129560", "text": "# -*- coding:utf-8 -*-\n\nimport unittest\n\nimport nagisa\n\n\nclass TestNagisa(unittest.TestCase):\n def test_tagging(self):\n # test_1\n text = 'Pythonで簡単に使えるツールです'\n output = 'Python/名詞 で/助詞 簡単/形状詞 に/助動詞 使える/動詞 ツール/名詞 です/助動詞'\n words = nagisa.tagging(text)\n self.assertEqual(output, str(words))\n\n # test_2\n output = 'python/名詞 で/助詞 簡単/形状詞 に/助動詞 使える/動詞 ツール/名詞 です/助動詞'\n words = nagisa.tagging(text, lower=True)\n self.assertEqual(output, str(words))\n\n\n # test_3\n text = 'ニューラルネットワークを使ってます。'\n output = 'ニューラル/名詞 ネットワーク/名詞 を/助詞 使っ/動詞 て/助動詞 ます/助動詞 。/補助記号'\n self.assertEqual(output, str(nagisa.tagging(text)))\n\n # test_4\n tagger_nn = nagisa.Tagger(single_word_list=['ニューラルネットワーク', \"ニューラルネット\"])\n output = 'ニューラルネットワーク/名詞 を/助詞 使っ/動詞 て/助動詞 ます/助動詞 。/補助記号'\n self.assertEqual(output, str(tagger_nn.tagging(text)))\n\n # test_5\n text = \"3月に見た「3月のライオン」\"\n new_tagger = nagisa.Tagger(single_word_list=['3月のライオン'])\n output = '3/名詞 月/名詞 に/助詞 見/動詞 た/助動詞 「/補助記号 3月のライオン/名詞 」/補助記号'\n self.assertEqual(output, str(new_tagger.tagging(text)))\n\n # test_6\n text = \"それが、iPhone XSです。\"\n output = \"それ/代名詞 が/助詞 、/補助記号 iPhone XS/名詞 です/助動詞 。/補助記号\"\n new_tagger = nagisa.Tagger(single_word_list=[\"iPhone[a-zA-Z0-9 ]+\"])\n\n self.assertEqual(output, str(new_tagger.tagging(text)))\n\n # test_7\n text = \"1234abc ABC\"\n output = \"1234/名詞 abc ABC/名詞\"\n new_tagger = nagisa.Tagger(single_word_list=[\"[a-zA-Z ]+\", \"[0-9]+\"])\n\n self.assertEqual(output, str(new_tagger.tagging(text)))\n\n # test_8\n text = '(人•ᴗ•♡)こんばんは♪'\n output = '(人•ᴗ•♡)/補助記号 こんばんは/感動詞 ♪/補助記号'\n words = nagisa.tagging(text)\n self.assertEqual(output, str(words))\n\n # test_9\n url = 'https://github.com/taishi-i/nagisaでコードを公開中(๑¯ω¯๑)'\n output = 'コード/名詞 公開/名詞 中/接尾辞'\n words = nagisa.filter(url, filter_postags=['URL', '補助記号', '助詞'])\n self.assertEqual(output, str(words))\n\n # test_10\n output = 'https://github.com/taishi-i/nagisa/URL で/助詞 を/助詞 (๑ ̄ω ̄๑)/補助記号'\n words = nagisa.extract(url, extract_postags=['URL', '補助記号', '助詞'])\n self.assertEqual(output, str(words))\n\n # test_11\n words = [\" (人•ᴗ•♡)\",\"こんばんは\",\"♪\"]\n output = ['補助記号', '感動詞', '補助記号']\n postags = nagisa.postagging(words)\n self.assertEqual(output, postags)\n\n # test_12\n postags = nagisa.decode(words)\n self.assertEqual(output, postags)\n\n\n def test_fit(self):\n # test_13\n nagisa.fit(\n train_file=\"nagisa/data/sample_datasets/sample.train\",\n dev_file=\"nagisa/data/sample_datasets/sample.dev\",\n test_file=\"nagisa/data/sample_datasets/sample.test\",\n model_name=\"sample\",\n )\n\n # test_14\n nagisa.fit(\n train_file=\"nagisa/data/sample_datasets/sample.train\",\n dev_file=\"nagisa/data/sample_datasets/sample.dev\",\n test_file=\"nagisa/data/sample_datasets/sample.test\",\n dict_file=\"nagisa/data/sample_datasets/sample.dict\",\n emb_file=\"nagisa/data/sample_datasets/sample.emb\",\n model_name=\"sample\",\n newline=\"EOS\",\n delimiter=\"\\t\"\n )\n\n\ndef suite():\n suite = unittest.TestSuite()\n suite.addTests(unittest.makeSuite(TestNagisa))\n return suite\n\n\nif __name__ == '__main__':\n unittest.main()\n", "sub_path": "test/nagisa_test.py", "file_name": "nagisa_test.py", "file_ext": "py", "file_size_in_byte": 4264, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "unittest.TestCase", "line_number": 8, "usage_type": "attribute"}, {"api_name": "nagisa.tagging", "line_number": 13, "usage_type": "call"}, {"api_name": "nagisa.tagging", "line_number": 18, "usage_type": "call"}, {"api_name": "nagisa.tagging", "line_number": 25, "usage_type": "call"}, {"api_name": "nagisa.Tagger", "line_number": 28, "usage_type": "call"}, {"api_name": "nagisa.Tagger", "line_number": 34, "usage_type": "call"}, {"api_name": "nagisa.Tagger", "line_number": 41, "usage_type": "call"}, {"api_name": "nagisa.Tagger", "line_number": 48, "usage_type": "call"}, {"api_name": "nagisa.tagging", "line_number": 55, "usage_type": "call"}, {"api_name": "nagisa.filter", "line_number": 61, "usage_type": "call"}, {"api_name": "nagisa.extract", "line_number": 66, "usage_type": "call"}, {"api_name": "nagisa.postagging", "line_number": 72, "usage_type": "call"}, {"api_name": "nagisa.decode", "line_number": 76, "usage_type": "call"}, {"api_name": "nagisa.fit", "line_number": 82, "usage_type": "call"}, {"api_name": "nagisa.fit", "line_number": 90, "usage_type": "call"}, {"api_name": "unittest.TestSuite", "line_number": 103, "usage_type": "call"}, {"api_name": "unittest.makeSuite", "line_number": 104, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 109, "usage_type": "call"}]} +{"seq_id": "257349115", "text": "\"\"\"\ndefines routines useful for handling ULTRACAM log files\n\nClass\n=====\n\nUlog : represents all ULTRACAM data of a series of logfiles\n\nUlogError : exception class\n\nFunctions\n=========\n\nrulog : reads one star's ultracam data directly from an ASCII log file\n to return a Dset\n\"\"\"\n\nfrom __future__ import division, absolute_import, print_function\n\nimport numpy as np\nimport astropy.io.fits as fits\nfrom trm import subs\n\nfrom .core import Axis, Dset, DnlError\n\ndef rulog(fname, nccd, aperture, type='c', emax=5, form='new'):\n \"\"\"\n Reads ultracam data directly into a Dset.\n\n NB it is more efficient for multiple Dsets from one log file\n to read the log file first into an Ulog and then to extract\n Dsets from this.\n\n fname : string\n log file name\n\n nccd : int\n ccd number, starting from 1.\n\n aperture : int\n aperture number, starting from 1.\n\n type : string\n 'c' for counts (no other type recognised at present)\n\n emax : int\n maximum error code\n\n form : string\n 'new' or 'old' format. New style has fewer columns\n \"\"\"\n\n if form == 'new':\n off = 0\n elif form == 'old':\n off = 1\n else:\n raise UlogError(\"rulog: did not recognize value of 'form' \" +\n \"which must be either 'new' or 'old'\")\n\n if type == 'c':\n if aperture < 1:\n raise UlogError('rulog: aperture < 1')\n offset = 14*(aperture-1)\n yc = offset + 15 + off\n yec = offset + 16 + off\n fc = offset + 21 + off\n dset = _rulog(fname, nccd, 'Count rate', 'cts/sec', yc,\n yec, off, fc, emax)\n dset.y.data /= (2.*86400.)*dset.x.errors\n if dset.y.has_errors:\n dset.y.errors /= (2.*86400.)*dset.x.errors\n dset['NCCD'] = nccd\n dset['Aperture'] = aperture\n else:\n raise UlogError('rulog: type = ' + type + ' unrecognised.')\n\n dset['Type'] = 'time series'\n dset['Time'] = 'MJD (UTC)'\n return dset\n\ndef _rulog(fname, nccd, yaxis, yunits, yc, yec, off, fc, emax):\n \"\"\"\n Reads ultracam data in, returning a Dset.\n\n fname : string\n file name\n\n nccd : int\n ccd number\n\n yaxis : string\n name for y axis data\n\n yunits : string\n units for y axis data\n\n yc : int\n column for y data\n\n yec : int\n column for y errors. 0 to ignore.\n\n off : int\n offset, 0 or 1, for new or old log file formats\n\n fc : int\n column for error flag. 0 to ignore.\n\n emax : int\n maximum error code\n \"\"\"\n\n if yc < 1:\n raise UlogError('_rulog: yc < 1')\n if nccd < 1:\n raise UlogError('_rulog: nccd < 1')\n\n fin = open(fname)\n xc = 1\n xec = 3+off\n yc -= 1\n yec -= 1\n fc -= 1\n x = []\n xe = []\n y = []\n ye = []\n flag = []\n cmax = max(4+off, yc, yec)\n for line in fin:\n if line[0:1] != '#' and not line.isspace():\n svar = line.split()\n if len(svar) <= cmax:\n raise UlogError('_rulog: too few columns of data in ' +\n fname)\n\n if int(svar[4+off]) == nccd:\n x.append(svar[xc])\n xe.append(svar[xec])\n y.append(svar[yc])\n if yec > -1: ye.append(svar[yec])\n if fc > -1: flag.append(svar[fc])\n fin.close()\n if len(x) == 0:\n raise UlogError('_rulog: no data loaded from ' + fname)\n\n # set data types, convert to arrays\n x = np.asarray(x, np.float64)\n xe = np.asarray(xe, np.float32)/(2.*86400.)\n y = np.asarray(y, np.float32)\n ye = np.asarray(ye, np.float32) if yec > -1 else None\n flag = np.asarray(flag, np.int) if fc > -1 else None\n\n # identify good data\n if yec > -1:\n ygood = ye > 0.\n else:\n ygood = None\n\n if fc > -1:\n good = flag <= emax\n else:\n good = None\n\n return Dset(Axis('MJD (UTC)', 'days', x, xe),\n Axis(yaxis, yunits, y, ye, ygood), good=good)\n\nclass Ulog(object):\n\n \"\"\"\n Class for ULTRACAM data analysis. Contains all data of an ULTRACAM log\n file or files.\n\n Every attribute is a dictionary keyed by CCD number (e.g. 1, 2 or 3 for\n ULTRACAM). As it is a dictionary, it is possible to have an entry for\n CCD 2 but not 1 for instance.\n\n The following attributes each return an array, with data type given at\n the end of the description lines::\n\n utc : dict\n UTC times at mid exposure, in MJD [float64]\n\n tflag : dict\n reliability of times [bool]\n\n expose : dict\n exposure time, seconds [float32]\n\n fwhm : dict\n FWHM in pixels [float32]\n\n beta : dict\n Moffat beta parameter [float32]\n\n The next attributes each returns a list of arrays, with one entry per\n aperture used for the CCD in question. Thus x[2][1] gives the x positions\n for the second aperture (usual C-like start at 0 convention for lists) of\n CCD 3.\n\n x : dict\n x positions used [float32]\n\n y : dict\n y positions used [float32]\n\n xm : dict\n x positions measured (0 for invalid or linked apertures) [float32]\n\n ym : dict\n y positions measured (0 for invalid or linked apertures) [float32]\n\n exm : dict\n 1-sigma uncertainties in measured x positions (-1 if invalid /\n linked) [float32]\n\n eym : dict\n 1-sigma uncertainties in measured y positions (-1 if invalid /\n linked) [float32]\n\n counts : dict\n counts in the aperture [float32]\n\n sigma : dict\n 1-sigma uncertainties on counts in the aperture [float32]\n\n sky : dict\n sky background, counts per pixel [float32]\n\n nsky : dict\n number of sky pixels [int]\n\n nrej : dict\n number of pixels rejected from the sky [int]\n\n worst : dict\n worst bad pixel in aperture (exact meaning of this is down to the\n user) [int]\n\n eflag : dict\n error flag (see ultracam reduce file for meanings) [int]\n\n Construct using::\n\n ult = Ulog(flist)\n\n where flist is a list of log files. These can be either ASCII '.log'\n files or FITS, but must be all of a type.\n \"\"\"\n\n def __init__(self, files, form='new'):\n \"\"\"\n Constructs an Ultracam from a file or files\n\n files : string / list\n file name or list of file names which will be read in order\n and should match in format, aperture numbers etc.\n \"\"\"\n\n if form == 'new':\n off = 0\n else:\n off = 1\n\n nline = 0\n nccd = set()\n naper = {}\n self.utc = {}\n self.tflag = {}\n self.expose = {}\n self.fwhm = {}\n self.beta = {}\n self.x = {}\n self.y = {}\n self.xm = {}\n self.ym = {}\n self.exm = {}\n self.eym = {}\n self.counts = {}\n self.sigma = {}\n self.sky = {}\n self.nsky = {}\n self.nrej = {}\n self.worst = {}\n self.eflag = {}\n found_all_ccds = False\n\n if isinstance(files, str):\n files = [files]\n\n if files[0].endswith('.log'):\n ftype = 'log'\n elif files[0].endswith('.fits') or files[0].endswith('.fit') or \\\n files[0].endswith('.fits.gz') or files[0].endswith('.fit.gz'):\n ftype = 'fits'\n else:\n raise UlogError('Ulog(files): did not recognize file type of' +\n ' first one = ' + files[0])\n\n for fname in files[1:]:\n if files[0].endswith('.log') and ftype == 'fits':\n raise UlogError('Ulog(files): clashing file type. Expected' +\n ' an ASCII log file but got = ' + fname)\n elif (files[0].endswith('.fits') or files[0].endswith('.fit') or\n files[0].endswith('.fits.gz') or\n files[0].endswith('.fit.gz')) and ftype == 'log' :\n raise UlogError('Ulog(files): clashing file type. Expected' +\n ' a FITS file but got = ' + fname)\n\n if ftype == 'log':\n\n for fname in files:\n\n fin = open(fname)\n for line in fin:\n nline += 1\n if line[0:1] != '#' and not line.isspace():\n svar = line.split()\n\n # we accumulate apertures numbers for each new CCD\n # encounter, but if we re-find a CCD, we check that\n # aperture numbers match. Also check that extra CCDs\n # are not found after all were thought to have been\n # found\n if (len(svar) - 7 - off ) % 14 > 0:\n raise UlogError('Ulog.__init__: incorrect number' +\n ' of entries in line ' +\n str(nline) + ' of ' + fname)\n nc = int(svar[4+off])\n nap = (len(svar) - 7 - off ) // 14\n if nc in nccd:\n if nap != naper[nc]:\n raise UlogError('Ulog.__init__: incorrect' +\n ' number of apertures in' +\n ' line ' + str(nline) +\n ' of ' + fname)\n found_all_ccds = True\n elif found_all_ccds:\n raise UlogError('Ulog.__init__: new CCD was ' +\n 'found even though all were ' +\n 'thought to be found in line ' +\n str(nline) + ' of ' + fname)\n else:\n nccd.add(nc)\n naper[nc] = nap\n\n # initialise the lists for this CCD\n self.utc[nc] = []\n self.tflag[nc] = []\n self.expose[nc] = []\n self.fwhm[nc] = []\n self.beta[nc] = []\n self.x[nc] = [[] for i in range(nap)]\n self.y[nc] = [[] for i in range(nap)]\n self.xm[nc] = [[] for i in range(nap)]\n self.ym[nc] = [[] for i in range(nap)]\n self.exm[nc] = [[] for i in range(nap)]\n self.eym[nc] = [[] for i in range(nap)]\n self.counts[nc] = [[] for i in range(nap)]\n self.sigma[nc] = [[] for i in range(nap)]\n self.sky[nc] = [[] for i in range(nap)]\n self.nsky[nc] = [[] for i in range(nap)]\n self.nrej[nc] = [[] for i in range(nap)]\n self.worst[nc] = [[] for i in range(nap)]\n self.eflag[nc] = [[] for i in range(nap)]\n\n # squirrel the data away\n self.utc[nc].append(svar[1])\n self.tflag[nc].append(svar[2])\n self.expose[nc].append(svar[3+off])\n self.fwhm[nc].append(svar[5+off])\n self.beta[nc].append(svar[6+off])\n for i in range(nap):\n offset = 14*i + 6 + off\n self.x[nc][i].append(svar[offset+2])\n self.y[nc][i].append(svar[offset+3])\n self.xm[nc][i].append(svar[offset+4])\n self.ym[nc][i].append(svar[offset+5])\n self.exm[nc][i].append(svar[offset+6])\n self.eym[nc][i].append(svar[offset+7])\n self.counts[nc][i].append(svar[offset+8])\n self.sigma[nc][i].append(svar[offset+9])\n self.sky[nc][i].append(svar[offset+10])\n self.nsky[nc][i].append(svar[offset+11])\n self.nrej[nc][i].append(svar[offset+12])\n self.worst[nc][i].append(svar[offset+13])\n self.eflag[nc][i].append(svar[offset+14])\n\n fin.close()\n\n # Transform to numpy arrays of correct type\n for nc in nccd:\n\n self.utc[nc] = np.asarray(self.utc[nc], np.float64)\n self.tflag[nc] = np.asarray(self.tflag[nc], np.bool)\n self.expose[nc] = np.asarray(self.expose[nc], np.float32)\n self.fwhm[nc] = np.asarray(self.fwhm[nc], np.float32)\n self.beta[nc] = np.asarray(self.beta[nc], np.float32)\n\n for nap in range(naper[nc]):\n self.x[nc][nap] = np.asarray(self.x[nc][nap],\n np.float32)\n self.y[nc][nap] = np.asarray(self.y[nc][nap],\n np.float32)\n self.xm[nc][nap] = np.asarray(self.xm[nc][nap],\n np.float32)\n self.ym[nc][nap] = np.asarray(self.ym[nc][nap],\n np.float32)\n self.exm[nc][nap] = np.asarray(self.exm[nc][nap],\n np.float32)\n self.eym[nc][nap] = np.asarray(self.eym[nc][nap],\n np.float32)\n self.counts[nc][nap] = np.asarray(self.counts[nc][nap],\n np.float32)\n self.sigma[nc][nap] = np.asarray(self.sigma[nc][nap],\n np.float32)\n self.sky[nc][nap] = np.asarray(self.sky[nc][nap],\n np.float32)\n self.nsky[nc][nap] = np.asarray(self.nsky[nc][nap],\n np.int)\n self.nrej[nc][nap] = np.asarray(self.nrej[nc][nap],\n np.int)\n self.worst[nc][nap] = np.asarray(self.worst[nc][nap],\n np.int)\n self.eflag[nc][nap] = np.asarray(self.eflag[nc][nap],\n np.int)\n\n elif ftype == 'fits':\n\n for fname in files:\n hdulist = fits.open(fname)\n\n for n in range(1,len(hdulist)):\n\n thead = hdulist[n].header\n nc = thead['NCCD']\n nap = (thead['TFIELDS'] - 5 ) // 13\n\n if nc in nccd:\n\n # append to already created lists\n if nap != naper[nc]:\n raise UlogError('Ulog.__init__: incorrect' +\n ' number of apertures in ' + fname)\n\n # to reach here, all CCDs must have been found\n found_all_ccds = True\n tdata = hdulist[n].data\n\n self.utc[nc] = np.concatenate((self.utc[nc],\n tdata.field('MJD')))\n self.tflag[nc] = np.concatenate((self.tflag[nc],\n tdata.field('Flag')))\n self.expose[nc] = np.concatenate((self.expose[nc],\n tdata.field('Expose')))\n self.fwhm[nc] = np.concatenate((self.fwhm[nc],\n tdata.field('FWHM')))\n self.beta[nc] = np.concatenate((self.beta[nc],\n tdata.field('beta')))\n for i in range(nap):\n lbl = '_' + str(i+1)\n self.x[nc][i] = np.concatenate(\n (self.x[nc][i],tdata.field('X' + lbl)))\n self.y[nc][i] = np.concatenate(\n (self.y[nc][i], tdata.field('Y' + lbl)))\n self.xm[nc][i] = np.concatenate(\n (self.xm[nc][i], tdata.field('XM' + lbl)))\n self.ym[nc][i] = np.concatenate(\n (self.ym[nc][i], tdata.field('YM' + lbl)))\n self.exm[nc][i] = np.concatenate(\n (self.exm[nc][i],tdata.field('EXM' + lbl)))\n self.eym[nc][i] = np.concatenate(\n (self.eym[nc][i],tdata.field('EYM' + lbl)))\n self.counts[nc][i] = np.concatenate(\n (self.counts[nc][i],\n tdata.field('Counts' + lbl)))\n self.sigma[nc][i] = np.concatenate(\n (self.sigma[nc][i],tdata.field('Sigma' + lbl)))\n self.sky[nc][i] = np.concatenate(\n (self.sky[nc][i],tdata.field('Sky' + lbl)))\n self.nsky[nc][i] = np.concatenate(\n (self.nsky[nc][i],tdata.field('Nsky' + lbl)))\n self.nrej[nc][i] = np.concatenate(\n (self.nrej[nc][i],tdata.field('Nsky' + lbl)))\n self.worst[nc][i] = np.concatenate(\n (self.worst[nc][i],tdata.field('Worst' + lbl)))\n self.eflag[nc][i] = np.concatenate(\n (self.eflag[nc][i],tdata.field('Eflag' + lbl)))\n\n elif found_all_ccds:\n raise UlogError('Ulog.__init__: new CCD was found ' +\n 'even though all were thought to be ' +\n 'found in ' + fname)\n else:\n\n # initialise the lists\n nccd.add(nc)\n naper[nc] = nap\n tdata = hdulist[n].data\n\n self.utc[nc] = tdata.field('MJD')\n self.tflag[nc] = tdata.field('Flag')\n self.expose[nc] = tdata.field('Expose')\n self.fwhm[nc] = tdata.field('FWHM')\n self.beta[nc] = tdata.field('beta')\n self.x[nc] = [tdata.field('X_' + str(i+1))\n for i in range(nap)]\n self.y[nc] = [tdata.field('Y_' + str(i+1))\n for i in range(nap)]\n self.xm[nc] = [tdata.field('XM_' + str(i+1))\n for i in range(nap)]\n self.ym[nc] = [tdata.field('YM_' + str(i+1))\n for i in range(nap)]\n self.exm[nc] = [tdata.field('EXM_' + str(i+1))\n for i in range(nap)]\n self.eym[nc] = [tdata.field('EYM_' + str(i+1))\n for i in range(nap)]\n self.counts[nc] = [tdata.field('Counts_' + str(i+1))\n for i in range(nap)]\n self.sigma[nc] = [tdata.field('Sigma_' + str(i+1))\n for i in range(nap)]\n self.sky[nc] = [tdata.field('Sky_' + str(i+1))\n for i in range(nap)]\n self.nsky[nc] = [tdata.field('Nsky_' + str(i+1))\n for i in range(nap)]\n self.nrej[nc] = [tdata.field('Nsky_' + str(i+1))\n for i in range(nap)]\n self.worst[nc] = [tdata.field('Worst_' + str(i+1))\n for i in range(nap)]\n self.eflag[nc] = [tdata.field('Eflag_' + str(i+1))\n for i in range(nap)]\n\n hdulist.close()\n\n def __iadd__(self, other):\n \"\"\"\n += in-place addition to add one Ulog onto the end of another\n\n ccd numbers and aperture numbers for each CCD must match. This is useful\n when reading in a sequence of log files.\n \"\"\"\n\n if not isinstance(other, Ulog):\n raise UlogError('Ulog.__iadd__: can' +\n ' only add another Ulog to an Ulog.')\n\n nccd = set(self.utc.keys())\n if set(other.utc.keys()) != nccd:\n raise UlogError('Ulog.__iadd__: CCD numbers of inputs do not match')\n\n for nc in nccd:\n if len(self.x[nc]) != len(other.x[nc]):\n raise UlogError('Ulog.__iadd__: incompatible' +\n ' aperture numbers for CCD ' + nc)\n\n # OK, tests passed, add new arrays onto the end of the old ones\n for nc in nccd:\n\n self.utc[nc] = np.concatenate((self.utc[nc], other.utc[nc]))\n self.tflag[nc] = np.concatenate((self.tflag[nc], other.tflag[nc]))\n self.expose[nc] = np.concatenate((self.expose[nc],other.expose[nc]))\n self.fwhm[nc] = np.concatenate((self.fwhm[nc],other.fwhm[nc]))\n self.beta[nc] = np.concatenate((self.beta[nc],other.beta[nc]))\n\n for nap in range(len(self.x[nc])):\n self.x[nc][nap] = np.concatenate(\n (self.x[nc][nap],other.x[nc][nap]))\n self.y[nc][nap] = np.concatenate(\n (self.y[nc][nap],other.y[nc][nap]))\n self.xm[nc][nap] = np.concatenate(\n (self.xm[nc][nap],other.xm[nc][nap]))\n self.ym[nc][nap] = np.concatenate(\n (self.ym[nc][nap],other.ym[nc][nap]))\n self.exm[nc][nap] = np.concatenate(\n (self.exm[nc][nap],other.exm[nc][nap]))\n self.eym[nc][nap] = np.concatenate(\n (self.eym[nc][nap],other.eym[nc][nap]))\n self.counts[nc][nap] = np.concatenate(\n (self.counts[nc][nap],other.counts[nc][nap]))\n self.sigma[nc][nap] = np.concatenate(\n (self.sigma[nc][nap],other.sigma[nc][nap]))\n self.sky[nc][nap] = np.concatenate(\n (self.sky[nc][nap],other.sky[nc][nap]))\n self.nsky[nc][nap] = np.concatenate(\n (self.nsky[nc][nap],other.nsky[nc][nap]))\n self.nrej[nc][nap] = np.concatenate(\n (self.nrej[nc][nap],other.nrej[nc][nap]))\n self.worst[nc][nap] = np.concatenate(\n (self.worst[nc][nap],other.worst[nc][nap]))\n self.eflag[nc][nap] = np.concatenate(\n (self.eflag[nc][nap],other.eflag[nc][nap]))\n return self\n\n def tseries(self, nccd, naper, ttype='c', wmax=50, emax=5):\n \"\"\"\n Return a time series as a Dset from an Ulog. The exposure\n times are stored in the \"errors\" assigned to the X axis\n (after diviasion by 2 so that +/- the error = exposure time).\n\n nccd : int\n CCD number\n\n naper : int\n aperture number (starting from 1)\n\n ttype : string\n 'c' counts, 'xm' measured x, 'ym' measured y, 'f' fwhm\n\n wmax : int\n maximum bad pixel in aperture above which data will be\n flagged as bad (only applies to aperture data).\n\n emax : int\n maximum error flag, above which data will be flagged as\n bad (only applies to aperture data).\n \"\"\"\n if nccd not in self.utc:\n raise UlogError('Ulog.tseries: nccd = ' + str(nccd) +\n ' not found.')\n if naper < 1 or naper > len(self.x[nccd]):\n raise UlogError('Ulog.tseries: naper = ' + str(naper) +\n ' not found in CCD = ' + str(nccd))\n if ttype != 'c' and ttype != 'xm' and ttype != 'ym' and ttype != 'f':\n raise UlogError('Ulog.tseries: ttype = ' + ttype +\n ' is not a recognised times series type.')\n\n nap = naper - 1\n\n # construct the Dset. First the x-axis, common to all.\n xaxis = Axis('MJD (UTC)', 'days', self.utc[nccd],\n self.expose[nccd]/86400./2., self.tflag[nccd])\n\n # common header items\n head = fits.Header()\n head['TYPE'] = ('time series', 'Type of data')\n head['INSTRUME'] = ('ULTRACAM', 'Instrument')\n head['NCCD'] = (nccd, 'CCD number')\n\n # next the yaxis and data mask arrays\n if ttype == 'c':\n ye = self.sigma[nccd][nap].copy()\n ygood = ye > 0.\n y = self.counts[nccd][nap].copy()\n y[ygood] /= self.expose[nccd][ygood]\n ye[ygood] /= self.expose[nccd][ygood]\n yaxis = Axis('Count rate', 'cts/sec', y, ye, ygood)\n\n good = (self.worst[nccd][nap] <= wmax) & \\\n (self.eflag[nccd][nap] <= emax)\n\n head['NAPERTUR'] = (naper, 'Aperture number')\n\n elif ttype == 'xm':\n yaxis = Axis('X position', 'pixels', self.xm[nccd][nap],\n self.xme[nccd][nap])\n good = (self.worst[nccd][nap] <= wmax) & \\\n (self.eflag[nccd][nap] <= emax)\n\n head['NAPERTUR'] = (naper, 'Aperture number')\n\n elif ttype == 'ym':\n yaxis = Axis('Y position', 'pixels', self.ym[nccd][nap],\n self.yme[nccd][nap])\n good = (self.worst[nccd][nap] <= wmax) & \\\n (self.eflag[nccd][nap] <= emax)\n\n head['NAPERTUR'] = (naper, 'Aperture number')\n\n elif ttype == 'f':\n yaxis = Axis('FWHM seeing', 'pixels', self.fwhm[nccd])\n good = (self.worst[nccd][nap] <= wmax) & \\\n (self.eflag[nccd][nap] <= emax)\n\n # create Dset\n dset = Dset(xaxis, yaxis, head, good)\n\n return dset\n\n# Exception class\nclass UlogError(DnlError):\n \"\"\"For throwing exceptions from the dnl.ulog module\"\"\"\n pass\n\n", "sub_path": "old/trm/dnl/ulog.py", "file_name": "ulog.py", "file_ext": "py", "file_size_in_byte": 27289, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "numpy.asarray", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 149, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 150, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 151, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 152, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 153, "usage_type": "attribute"}, {"api_name": "core.Dset", "line_number": 166, "usage_type": "call"}, {"api_name": "core.Axis", "line_number": 166, "usage_type": "call"}, {"api_name": "core.Axis", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 395, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 395, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 396, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 396, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 397, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 397, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 398, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 398, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 399, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 399, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 402, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 403, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 404, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 405, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 406, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 407, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 408, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 409, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 410, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 411, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 412, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 413, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 414, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 415, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 416, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 417, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 418, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 419, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 420, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 421, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 422, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 423, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 424, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 425, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 426, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 427, "usage_type": "attribute"}, {"api_name": "astropy.io.fits.open", "line_number": 432, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 432, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 451, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 453, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 455, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 457, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 459, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 463, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 465, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 467, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 469, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 471, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 473, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 475, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 478, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 480, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 482, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 484, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 486, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 488, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 560, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 561, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 562, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 563, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 564, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 567, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 569, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 571, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 573, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 575, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 577, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 579, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 581, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 583, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 585, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 587, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 589, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 591, "usage_type": "call"}, {"api_name": "core.Axis", "line_number": 631, "usage_type": "call"}, {"api_name": "astropy.io.fits.Header", "line_number": 635, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 635, "usage_type": "name"}, {"api_name": "core.Axis", "line_number": 647, "usage_type": "call"}, {"api_name": "core.Axis", "line_number": 655, "usage_type": "call"}, {"api_name": "core.Axis", "line_number": 663, "usage_type": "call"}, {"api_name": "core.Axis", "line_number": 671, "usage_type": "call"}, {"api_name": "core.Dset", "line_number": 676, "usage_type": "call"}, {"api_name": "core.DnlError", "line_number": 681, "usage_type": "name"}]} +{"seq_id": "297817909", "text": "\"\"\"Run cplex experiments.\"\"\"\n\n\n# Imports\nfrom experiments import (\n logger,\n SNAP_DATA_DIR,\n SNAP_DATA_EXT,\n PREPROCESSING_TIMEOUTS\n)\nfrom experiments.datasets import preprocessed\nfrom experiments.heuristic import (\n CPLEX_RESULTS_DATA_FILE,\n HEURISTICS_CSV_HEADERS\n)\nfrom src.ilp.solver import solve, read_edgelist\nfrom itertools import product\nimport csv\nimport os\n\n\ndef main():\n \"\"\"Run experiments\"\"\"\n\n # Compute datasets\n datasets = list(map(\n lambda d: str(SNAP_DATA_DIR / (d + SNAP_DATA_EXT)),\n preprocessed\n ))\n\n # Log\n logger.info('Starting CPLEX Experiment')\n\n # Generate experiments\n experiments = product(PREPROCESSING_TIMEOUTS, datasets)\n\n # Open output file\n with open(str(CPLEX_RESULTS_DATA_FILE), 'w') as output:\n\n # Get writer\n writer = csv.writer(output)\n\n # Write header\n writer.writerow(HEURISTICS_CSV_HEADERS)\n\n # Run experiments\n for experiment in experiments:\n\n try:\n\n # Log\n logger.info(\n 'Starting experiment timeout={} dataset={}'\n .format(*experiment)\n )\n\n # Run cplex\n solution = solve(\n read_edgelist(experiment[1]),\n formulation='VC',\n solver='CPLEX',\n threads=4,\n timelimit=experiment[0],\n convert_to_oct=True\n )\n\n # Write\n writer.writerow([\n os.path.basename(experiment[1]),\n experiment[0],\n solution.opt,\n solution.time,\n solution.certificate\n ])\n output.flush()\n\n except Exception as e:\n\n logger.error(e)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "experiments/heuristic/cplex.py", "file_name": "cplex.py", "file_ext": "py", "file_size_in_byte": 1916, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "experiments.datasets.preprocessed", "line_number": 28, "usage_type": "argument"}, {"api_name": "experiments.SNAP_DATA_DIR", "line_number": 27, "usage_type": "name"}, {"api_name": "experiments.SNAP_DATA_EXT", "line_number": 27, "usage_type": "name"}, {"api_name": "experiments.logger.info", "line_number": 32, "usage_type": "call"}, {"api_name": "experiments.logger", "line_number": 32, "usage_type": "name"}, {"api_name": "itertools.product", "line_number": 35, "usage_type": "call"}, {"api_name": "experiments.PREPROCESSING_TIMEOUTS", "line_number": 35, "usage_type": "argument"}, {"api_name": "experiments.heuristic.CPLEX_RESULTS_DATA_FILE", "line_number": 38, "usage_type": "argument"}, {"api_name": "csv.writer", "line_number": 41, "usage_type": "call"}, {"api_name": "experiments.heuristic.HEURISTICS_CSV_HEADERS", "line_number": 44, "usage_type": "argument"}, {"api_name": "experiments.logger.info", "line_number": 52, "usage_type": "call"}, {"api_name": "experiments.logger", "line_number": 52, "usage_type": "name"}, {"api_name": "src.ilp.solver.solve", "line_number": 58, "usage_type": "call"}, {"api_name": "src.ilp.solver.read_edgelist", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "experiments.logger.error", "line_number": 79, "usage_type": "call"}, {"api_name": "experiments.logger", "line_number": 79, "usage_type": "name"}]} +{"seq_id": "211302682", "text": "# import pandas\nimport matplotlib.pyplot as plt\nimport numpy\nfrom scipy.stats import norm\nimport math\n\ndef plot2(k,r,q,vol,t, step = 0.02, maxprice = 100):\n\n\tsspace = numpy.arange(step, maxprice+step, step)\n\n\tdelta = []\n\tgamma = []\n\ttheta = []\n\tvega = []\n\n\tfor s0 in sspace:\n\t\td1 = math.log(s0/k) + (r-q+(vol**2)/2)*t\n\t\td1 = d1/(vol*math.sqrt(t))\t\t\n\t\td2 = d1 - vol*math.sqrt(t)\n\n\t\ttempdelta = math.exp(-1*q*t)*norm.cdf(d1);\n\t\ttempgamma = norm.pdf(d1)*math.exp(-1*q*t)/(s0*vol*math.sqrt(t))\n\n\t\tterm1 = s0*norm.pdf(d1)*vol*math.exp(-1*q*t)/(2*math.sqrt(t))\n\t\tterm2 = q*s0*norm.cdf(d1)*math.exp(-1*q*t)\n\t\tterm3 = r*k*math.exp(-1*r*t)*norm.cdf(d2)\t\n\t\ttemptheta = -1*term1 + term2 - term3\n\t\t\n\t\ttempvega = s0*math.sqrt(t)*norm.pdf(d1)*math.exp(-1*q*t)\n\t\t\n\t\tgamma.append(tempgamma)\n\t\tdelta.append(tempdelta)\n\t\ttheta.append(temptheta)\n\t\tvega.append(tempvega)\n\n\tfig = plt.figure()\n\tfig.suptitle(\"Variation of Greeks with Stock Price\", fontsize=16)\n\t\n\tax = plt.subplot(221)\n\tax.plot(sspace, delta, color = \"black\")\t\n\tax.set_ylabel(\"Delta\", fontsize = 13); ax.set_xlabel(\"Stock Price ($)\", fontsize = 13)\n\t\n\tax = plt.subplot(222)\n\tax.plot(sspace, gamma, color = \"black\")\t\n\tax.set_ylabel(\"Gamma\", fontsize = 13); ax.set_xlabel(\"Stock Price ($)\", fontsize = 13)\n\t\n\tax = plt.subplot(223)\n\tax.plot(sspace, theta, color = \"black\")\t\t\n\tax.set_ylabel(\"Theta\", fontsize = 13); ax.set_xlabel(\"Stock Price ($)\", fontsize = 13)\n\t\n\tax = plt.subplot(224)\n\tax.plot(sspace, vega, color = \"black\")\t\n\tax.set_ylabel(\"Vega\", fontsize = 13); ax.set_xlabel(\"Stock Price ($)\", fontsize = 13)\n\n\tfig.subplots_adjust(top=0.90, bottom=0.12, left=0.12, right=0.95, hspace=0.45,\n wspace=0.35)\n\n\tplt.savefig(\"./Q2.png\")\n\t\n\t\n\n\nplot2(k = 50, r = 0.1, q = 0, vol = 0.3, t = 1)\n\n\n", "sub_path": "q2.py", "file_name": "q2.py", "file_ext": "py", "file_size_in_byte": 1754, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "numpy.arange", "line_number": 9, "usage_type": "call"}, {"api_name": "math.log", "line_number": 17, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 18, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 19, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 21, "usage_type": "call"}, {"api_name": "scipy.stats.norm.cdf", "line_number": 21, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 21, "usage_type": "name"}, {"api_name": "scipy.stats.norm.pdf", "line_number": 22, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 22, "usage_type": "name"}, {"api_name": "math.exp", "line_number": 22, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 22, "usage_type": "call"}, {"api_name": "scipy.stats.norm.pdf", "line_number": 24, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 24, "usage_type": "name"}, {"api_name": "math.exp", "line_number": 24, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 24, "usage_type": "call"}, {"api_name": "scipy.stats.norm.cdf", "line_number": 25, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 25, "usage_type": "name"}, {"api_name": "math.exp", "line_number": 25, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 26, "usage_type": "call"}, {"api_name": "scipy.stats.norm.cdf", "line_number": 26, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 26, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 29, "usage_type": "call"}, {"api_name": "scipy.stats.norm.pdf", "line_number": 29, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 29, "usage_type": "name"}, {"api_name": "math.exp", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}]} +{"seq_id": "116896679", "text": "from django.urls import path\nfrom . import views\n\napp_name = \"web_app\"\nurlpatterns = [\n path('login/', views.login, name='login'),\n path('register/', views.register, name='register'),\n path('', views.index, name='index'),\n path('logout/', views.logout, name='logout'),\n path('mon_compte/', views.my_account, name='my_account'),\n path('results//', views.results, name='results'),\n path('details//', views.details, name='details'),\n path('save/', views.saveproduct, name='saveproduct'),\n path('ajax/save/', views.saveproduct, name='save'),\n path('ajax/delete/', views.delete_prod, name='delete'),\n path('my_favs/', views.my_favs, name='my_favs'),\n path('legals/', views.legals, name='legals'),\n\n\n]\n\n", "sub_path": "web_app/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 751, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "333189550", "text": "# Retrieve N2 Authors\r\n# By: Eric Livingston (e.livingston@Elsevier.com)\r\n# Copyright © 2017 Elsevier B.V. All rights reserved.\r\n\r\n# Redistribution and use in source and binary forms, with or without\r\n# modification, are permitted provided that the following conditions are\r\n# met:\r\n\r\n# 1.\tRedistributions of source code must retain the above copyright notice,\r\n# \tthis list of conditions and the following disclaimer.\r\n\r\n# 2.\tRedistributions in binary form must reproduce the above copyright\r\n# \tnotice, this list of conditions and the following disclaimer in the\r\n# \tdocumentation and/or other materials provided with the distribution.\r\n\r\n# Neither the name of the copyright holder nor the names of its\r\n# contributors may be used to endorse or promote products derived from\r\n# this software without specific prior written permission.\r\n\r\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS\r\n# IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED\r\n# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\r\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\r\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\r\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED\r\n# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\r\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\r\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\r\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\r\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r\n\r\nimport json\r\nimport fileinput\r\nimport ElsevierClient as Elsevier\r\nfrom sortedcontainers import SortedList\r\nfrom sortedcontainers import SortedDict\r\n\r\nAUTH_FILE = \"Output\\\\authors_scopus.json\"\r\nN2_PUBS = \"Output\\\\n2_pubs_scopus.json\"\r\nScopus = Elsevier.PubClient()\r\n\r\n# Little Class that helps format output of publications\r\nclass Output:\r\n def __init__(s, name, mode='ab'):\r\n s.out = open(name, mode)\r\n\r\n def write(s, data):\r\n s.out.write((json.dumps(data) + '\\n').encode('UTF-8'))\r\n\r\n def flush(s):\r\n s.out.flush()\r\n\r\n def close(s):\r\n s.out.close()\r\n\r\nprint(\"Begin\")\r\n\r\n# Load Authors\r\nAuthIDs = SortedList()\r\nAuthCount = 0\r\ntry:\r\n with fileinput.input(files=(AUTH_FILE), openhook=fileinput.hook_encoded(\"utf-8\")) as f:\r\n for line in f:\r\n AuthCount += 1\r\n if not (AuthCount % 1000): print(\"Loaded {} Authors\".format(AuthCount))\r\n data = json.loads(line)\r\n auth = Elsevier.Author(data)\r\n if auth.SID not in AuthIDs: AuthIDs.add(auth.SID)\r\n print(\"{} Authors Loaded\".format(AuthCount))\r\nexcept: pass\r\n\r\nPubIDs = SortedList()\r\nAuthOut = Output(AUTH_FILE)\r\n\r\n# Load Core N1 Pubs\r\nN2_Count = 0\r\nDone = True\r\nwith fileinput.input(files=(N2_PUBS), openhook=fileinput.hook_encoded(\"utf-8\")) as f:\r\n for line in f:\r\n N2_Count += 1\r\n print(\"Processing Publication {}\".format(N2_Count))\r\n line = line.strip()\r\n try:\r\n data = json.loads(line)\r\n pub = Elsevier.ScopusPublication(data)\r\n if pub.SID not in PubIDs:\r\n PubIDs.add(pub.SID)\r\n if pub.Authors:\r\n AuthTotal = len(pub.Authors)\r\n AuthCount = 0\r\n for auth in pub.Authors: # Get publication Author\r\n AuthCount += 1\r\n if auth.SID not in AuthIDs: # Switch to retrieved Author (more data)\r\n print(\"Processing Author {}/{} - {}, {}\".format(AuthCount, AuthTotal, auth.LastName, auth.FirstName))\r\n try:\r\n auths = Scopus.get_author(auth.SID, include_raw=True)\r\n if auths:\r\n auth = auths[0]\r\n if auth.SID not in AuthIDs: # Might be re-assigned on a redirect\r\n Done = False\r\n AuthOut.write(auth.Raw)\r\n AuthOut.flush() # In case of future errors, ensure we save this out\r\n AuthIDs.add(auth.SID)\r\n else: print(\"Already Processed {}\".format(auth.LastName))\r\n except:\r\n print(\"Error retrieving author, skipping...\")\r\n Done = False\r\n else: print(\"Already Processed {}\".format(auth.LastName))\r\n else: print(\"No Authors listed for SID:{}\".format(pub.SID))\r\n else: print(\"Redundant Publication {}\".format(pub.SID))\r\n except Exception as e:\r\n print(\"Bad Input Line: {} ({})\".format(N2_Count, e))\r\n Done = False\r\nprint(\"{} Core Publications Loaded\".format(len(PubIDs)))\r\n\r\nAuthOut.close()\r\nprint(\"Done: {}\".format(Done))\r\n", "sub_path": "Bright_Stars/Python/04 - Retrieve N2 Authors.py", "file_name": "04 - Retrieve N2 Authors.py", "file_ext": "py", "file_size_in_byte": 5036, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "ElsevierClient.PubClient", "line_number": 40, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 48, "usage_type": "call"}, {"api_name": "sortedcontainers.SortedList", "line_number": 59, "usage_type": "call"}, {"api_name": "fileinput.input", "line_number": 62, "usage_type": "call"}, {"api_name": "fileinput.hook_encoded", "line_number": 62, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 66, "usage_type": "call"}, {"api_name": "ElsevierClient.Author", "line_number": 67, "usage_type": "call"}, {"api_name": "sortedcontainers.SortedList", "line_number": 72, "usage_type": "call"}, {"api_name": "fileinput.input", "line_number": 78, "usage_type": "call"}, {"api_name": "fileinput.hook_encoded", "line_number": 78, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 84, "usage_type": "call"}, {"api_name": "ElsevierClient.ScopusPublication", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "125874967", "text": "#2007-04-1 RJ Marsan\n#Pylaga\n#Original: 2007-02-20 Derek Mcdonald \n#Subclass of pylaga.py\n#################################################################################################################\n#\n#\tThe All Important Enemy class, and its manager EnemyManager\n#\n#\n#\n#\n#import pygame os and sys libraries\nimport pygame, os, sys, math, random\nimport globalvars\nfrom bullet import EnemyBullet\n\n\n#####################\nclass EnemyManager(pygame.sprite.RenderUpdates):\n\tdef __init__(self):\n\t\tpygame.sprite.RenderUpdates.__init__(self)\n\t\tself.asdf=0\n\t\tself.transition_speed=5\n\t\tself.transition_time=150/self.transition_speed\n\t\tself.current_transition=0\n\tdef shoot(self,shotslist):\n\t\tself.asdf=random.randint(0,globalvars.enemy_bullet_odds)\n\t\tif self.asdf < len(self):\n\t\t\tself.sprites()[self.asdf].shoot(shotslist)\n\t\n\tdef update(self):\n\t\tif self.current_transitions not ==s and !=s... terrible programming.\n\t\tif transition_speed > 0:\n\t\t\tself.rect.bottom+=transition_speed\n\t\telif self.envel <= 0:\n\t\t\tif self.rect.left < self.en_xmax:\n\t\t\t\tself.rect.right+=self.enspeed\n\t\t\telif self.rect.left >= self.en_xmax:\n\t\t\t\tself.envel = 1\n\t\telse:\n\t\t\tif self.rect.left > self.en_xmin:\t\n\t\t\t\tself.rect.right+=((-1)*self.enspeed)\n\t\t\telif self.rect.left <= self.en_xmin:\n\t\t\t\tself.envel = 0\n\t\tself.next_state()\n\t\t\t\n\t#-1 is normal, 0 is exploding, up to 4 are the animations for it\n\tdef set_state(self, varr):\n\t\tself.en_state=varr\n\t\t\n\tdef next_state(self):\n\t\tif self.en_state>=0 and self.en_state<5:\n\t\t\tself.image=globalvars.explosions[self.en_state]\n\t\t\tself.en_state+=1\n\t\telif self.en_state>4:\n\t\t\tself.parent.remove(self)\n\t\n\t#return the state\n\tdef get_state(self):\n\t\treturn self.en_state\n\t\n\tdef shoot(self,shotslist):\n\t\ttempb=EnemyBullet(shotslist)\n\t\ttempb.set_pos(self.rect.left+self.rect.width/2,self.rect.bottom)\n\t\tshotslist.add(tempb)\n###################\n\n\n", "sub_path": "enemy.py", "file_name": "enemy.py", "file_ext": "py", "file_size_in_byte": 3227, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pygame.sprite", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pygame.sprite.RenderUpdates.__init__", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 21, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 27, "usage_type": "call"}, {"api_name": "globalvars.enemy_bullet_odds", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Sprite.__init__", "line_number": 55, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 55, "usage_type": "attribute"}, {"api_name": "globalvars.init_enemy_speed", "line_number": 56, "usage_type": "attribute"}, {"api_name": "globalvars.xmax", "line_number": 58, "usage_type": "attribute"}, {"api_name": "globalvars.xmin", "line_number": 59, "usage_type": "attribute"}, {"api_name": "globalvars.enemyship", "line_number": 61, "usage_type": "attribute"}, {"api_name": "globalvars.explosions", "line_number": 103, "usage_type": "attribute"}, {"api_name": "bullet.EnemyBullet", "line_number": 113, "usage_type": "call"}]} +{"seq_id": "324504596", "text": "import json\nimport network3\nfrom network3 import Network\nfrom network3 import ConvPoolLayer, FullyConnectedLayer, SoftmaxLayer\nimport theano\nimport theano.tensor as T\nimport random\nimport numpy as np\n\ndef ReLU(z): return T.maximum(0.0, z)\n\ntraining_data, validation_data, test_data = network3.load_data_shared()\n#training_data, validation_data, test_data = network3.load_data_shared('../data/mnist_expanded.pkl.gz')\n\n# Hyperparamters\nmini_batch_size = 10\nnum_epochs = 30\neta = 0.03\nfilename = \"data\"\nlmbda = 0\np_dropout = 0.16666666666667\n\n# Set seed to facilitate reproducibility\nrandom.seed(12345678)\nnp.random.seed(12345678)\n\n# Build network\nnet = Network(\n [\n ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),\n filter_shape=(20, 1, 5, 5),\n poolsize=(2, 2),\n activation_fn=ReLU),\n ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12),\n filter_shape=(20, 20, 5, 5),\n poolsize=(2, 2),\n activation_fn=ReLU),\n FullyConnectedLayer(n_in=20*4*4, n_out=30, activation_fn=ReLU, p_dropout=p_dropout),\n FullyConnectedLayer(n_in=30, n_out=30, activation_fn=ReLU, p_dropout=p_dropout), \n SoftmaxLayer(n_in=30, n_out=10, p_dropout=p_dropout)\n ],\n mini_batch_size)\n\n# Call SGD\ntraining_accuracy, validation_accuracy = net.SGD(\n training_data,\n num_epochs,\n mini_batch_size,\n eta,\n validation_data,\n test_data,\n lmbda=lmbda,\n monitor_data=True)\n\n# Save data on file\nf = open(filename + \".txt\", \"w\")\njson.dump([training_accuracy, validation_accuracy], f)\nf.close();\n", "sub_path": "src/run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 1655, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "theano.tensor.maximum", "line_number": 10, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 10, "usage_type": "name"}, {"api_name": "network3.load_data_shared", "line_number": 12, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 25, "usage_type": "attribute"}, {"api_name": "network3.Network", "line_number": 28, "usage_type": "call"}, {"api_name": "network3.ConvPoolLayer", "line_number": 30, "usage_type": "call"}, {"api_name": "network3.ConvPoolLayer", "line_number": 34, "usage_type": "call"}, {"api_name": "network3.FullyConnectedLayer", "line_number": 38, "usage_type": "call"}, {"api_name": "network3.FullyConnectedLayer", "line_number": 39, "usage_type": "call"}, {"api_name": "network3.SoftmaxLayer", "line_number": 40, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "446585016", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 9 16:57:46 2019\n\n@author: lavanyasingh\n\"\"\"\n\nimport csv\nfrom matplotlib import pyplot as plt\n\nclass Viz_Maker:\n\n def __init__(self, infile=\"data/all_raw_cleaned.csv\"):\n self.infile=infile\n \n def read_in(self):\n sources = []\n with open(self.infile, 'r') as f:\n reader = csv.reader(f, delimiter=',')\n for line in reader: \n sources.append([line[1], line[7].replace(\"newscrawls\", \"newscrawl\").split(\" \")])\n print(\"DONE READING\") \n return sources\n \n def ms_bar_chart(self):\n sources = self.read_in()\n data = {}\n for source in sources:\n metasources = source[1]\n for ms in metasources:\n if ms != \"\": \n try:\n data[ms] += 1\n except KeyError:\n data.update({ms:1})\n datalist = [(val, key) for key, val in data.items()]\n print(datalist)\n \n top = sorted(datalist, reverse=True)\n \n y = [element[1] for element in top]\n x = [element[0] for element in top]\n \n plt.bar(y, x, align='center', alpha=1, color=\"#141d99\")\n plt.xlabel('Metasources', fontsize=15)\n plt.ylabel('Number of Sources', fontsize = 15)\n plt.xticks(y, y, rotation='vertical')\n plt.box(False)\n plt.tick_params(axis='both', length = 0)\n plt.locator_params(axis='y', nbins=4)\n plt.title(\"World News Project Metasources\", fontsize=18)\n plt.tight_layout()\n plt.show()\n \nif __name__ == '__main__':\n viz_maker = Viz_Maker()\n viz_maker.ms_bar_chart()\n #clean_ms()", "sub_path": "visualizations/viz_metasources.py", "file_name": "viz_metasources.py", "file_ext": "py", "file_size_in_byte": 1771, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "csv.reader", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.box", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tick_params", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.locator_params", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "110263228", "text": "import click\nimport json\nimport re\nimport io\nimport os\nimport signal\nimport IPython\nimport pandas as pd\n\nfrom ..command_base import CommandCategory, Command\nfrom ..coordinator import Coordinator\nfrom ..structure import Element, ContentType, Structure, micra_content_types, micra_structures\nfrom ..error import MicraQuit\nfrom moda.style import Styleds, CustomStyled, Format\nfrom typing import List, Set, TypeVar, Generic, Callable\nfrom enum import Enum\nfrom redis import Redis\nfrom pprint import pformat\n\n# ---------------------------------------------------------------------------\n# Enums\n# ---------------------------------------------------------------------------\n\nclass ElementTarget(Enum):\n content_type = 'types'\n structure = 'structures'\n\n def get_elements(self, redis: Redis) -> List[Element]:\n if self is ElementTarget.content_type:\n return [ContentType.from_dict(json.loads(v)) for v in redis.hgetall(micra_content_types.key).values()]\n elif self is ElementTarget.structure:\n return [Structure.from_dict(json.loads(v)) for v in redis.hgetall(micra_structures.key).values()]\n\nclass OutputFormat(Enum):\n string = 'string'\n pretty = 'pretty'\n json = 'json'\n csv = 'csv'\n dataframe = 'dataframe'\n\n def format(self, items: List[any], redis: Redis):\n items_are_structures = True\n for item in items:\n # TODO handle non-Structure items as necessary\n if not isinstance(item, Structure):\n items_are_structures = False\n break\n\n if self is OutputFormat.string:\n if items_are_structures:\n return '\\n\\n'.join(f'{s.identifier} {s.display_metadata(redis=redis)}\\n{\"—\" * len(s.identifier)}\\n{s.display_detail(redis=redis)}' for s in items)\n else:\n return '\\n'.join(str(i) for i in items)\n elif self is OutputFormat.pretty:\n if items_are_structures:\n return '\\n'.join(s.display_content(redis=redis) for s in items)\n else:\n return '\\n\\n'.join(pformat(item) for i in items)\n elif self is OutputFormat.json:\n if items_are_structures:\n # items = [\n # {\n # **json.loads(item.get_data_frame(redis=redis).to_json(orient='table')),\n # 'identifier': item.identifier,\n # }\n # for item in items\n # ]\n json_items = []\n for item in items:\n data_frame = item.get_data_frame(redis=redis)\n if 'level_0' in data_frame.columns:\n data_frame.drop(['level_0'], axis=1, inplace=True)\n json_items.append({\n **json.loads(data_frame.to_json(orient='table')),\n 'identifier': item.identifier,\n })\n return json.dumps(json_items)\n elif self is OutputFormat.csv:\n if items_are_structures:\n df = pd.DataFrame()\n for structure in items:\n structure_df = structure.get_data_frame(redis=redis)\n structure_df['identifier'] = structure.identifier\n df = df.append(structure_df)\n # df = df.reindex(sorted(df.columns), axis=1)\n else:\n df = pd.DataFrame([{'item': i} for i in items])\n buf = io.StringIO()\n df.to_csv(buf)\n return buf.getvalue()\n elif self is OutputFormat.dataframe:\n if items_are_structures:\n dfs = {item.identifier: item.get_data_frame(redis=redis) for item in items}\n else:\n dfs = {\n 'all': pd.DataFrame([{'item': item} for item in items])\n }\n local_ns = {'dfs': dfs}\n if len(dfs) == 1:\n local_ns['df'] = list(dfs.values())[0]\n console = IPython.terminal.embed.InteractiveShellEmbed()\n console.mainloop(local_ns=local_ns)\n return None\n\n# ---------------------------------------------------------------------------\n# Decorators\n# ---------------------------------------------------------------------------\n\nformat_command_option = click.option('-f', '--format', 'format_value', type=click.Choice([f.value for f in OutputFormat]), default=OutputFormat.string.value)\npublish_command_option = click.option('-p', '--publish', 'publish', type=str, multiple=True)\necho_command_option = click.option('-e', '--echo', 'should_echo', is_flag=True)\n\n# ---------------------------------------------------------------------------\n# Commands\n# ---------------------------------------------------------------------------\n\nC = TypeVar(Coordinator)\nclass CoordinatorCommand(Generic[C], Command[C]):\n pass\n\nclass StartCommand(CoordinatorCommand[CoordinatorCommand]):\n _all_names: str\n\n def __init__(self, all_names: List[str], context: CoordinatorCommand):\n self._all_names = all_names\n super().__init__(context=context)\n\n @property\n def name(self) -> str:\n return self._all_names[0]\n\n @property\n def aliases(self) -> List[str]:\n return self._all_names[1:]\n\n @property\n def click_command(self) -> click.Command:\n @click.command(name=self.name)\n def start():\n self.context.start()\n return start\n\n @property\n def can_run(self) -> bool:\n return not self.context.running\n\nclass QuitCommand(CoordinatorCommand[Coordinator]):\n @property\n def category(self) -> CommandCategory:\n return CommandCategory.caution\n\n @property\n def name(self) -> str:\n return 'quit'\n\n @property\n def aliases(self) -> List[str]:\n return ['q']\n\n @property\n def click_command(self) -> click.Command:\n @click.command(name=self.name, help='Quit µ')\n def click_command():\n raise MicraQuit()\n\n return click_command\n\n @property\n def can_run(self) -> bool:\n return self.context.running\n\nclass OutputCommand(Generic[C], CoordinatorCommand[C]):\n @property\n def micra_decorators(self) -> List[Callable[[Callable[..., any]], Callable[..., any]]]:\n return [\n self.publish_command_output,\n self.format_command_output,\n ]\n\n @property\n def click_decorators(self) -> List[Callable[[Callable[..., any]], Callable[..., any]]]:\n return [\n publish_command_option,\n echo_command_option,\n format_command_option,\n ]\n\n def format_command_output(self, f: Callable[..., any]) -> Callable[..., any]:\n def wrapped(*args, format_value: str, **kwargs):\n format = OutputFormat(format_value)\n items = f(*args, **kwargs)\n if items is None:\n return ''\n return format.format(items=items, redis=self.context.redis)\n\n return wrapped\n\n def publish_command_output(self, f: Callable[..., any]) -> Callable[..., any]:\n def wrapped(*args, publish: List[str], should_echo: bool, **kwargs):\n result = f(*args, **kwargs)\n for channel in publish:\n subscriber_count = self.context.redis.publish(channel, result)\n if should_echo:\n print(f'Sent response to {subscriber_count} subscribers.')\n if not publish or should_echo:\n print(result)\n\n return wrapped\n\nclass StatusCommand(OutputCommand[Coordinator]):\n @property\n def category(self) -> CommandCategory:\n return CommandCategory.info\n\n @property\n def name(self) -> str:\n return 'status'\n\n @property\n def aliases(self) -> List[str]:\n return ['s']\n\n @property\n def click_command(self) -> click.Command:\n @click.command()\n @self.decorate\n def status():\n return self.context.status_items\n return status\n\nclass ListCommand(OutputCommand[Coordinator]):\n @property\n def category(self) -> CommandCategory:\n return CommandCategory.info\n\n @property\n def name(self) -> str:\n return 'list'\n\n @property\n def aliases(self) -> List[str]:\n return ['ls']\n\n @property\n def click_command(self) -> click.Command:\n @click.command(name=self.name)\n @click.option('-t', '--target', 'targets', type=click.Choice([t.value for t in ElementTarget]), multiple=True)\n @self.decorate\n def click_command(targets):\n elements = [\n e\n for t in ElementTarget if not targets or t.value in targets\n for e in t.get_elements(redis=self.context.redis)\n ]\n return [Styleds(parts=[\n CustomStyled(e.display_name, Format().cyan()),\n CustomStyled(f' {e.display_summary}', Format().blue()),\n ]).styled for e in elements]\n\n return click_command\n\nclass ViewCommand(OutputCommand[Coordinator]):\n @property\n def category(self) -> CommandCategory:\n return CommandCategory.info\n\n @property\n def name(self) -> str:\n return 'view'\n\n @property\n def aliases(self) -> List[str]:\n return ['v']\n\n @property\n def click_command(self) -> click.Command:\n @click.command(name=self.name)\n @click.option('-s', '--structure-id', 'ids', help='Filter output by structure IDs.', multiple=True)\n @click.option('-t', '--tag', 'tags', help='Filter output by tags.', multiple=True)\n @self.decorate\n def click_command(ids: List[str], tags: List[str]):\n id_regexes = list(map(re.compile, ids))\n def key_matches(keys: Set[str], regexes: List[re.Pattern]):\n for key in keys:\n for regex in regexes:\n if regex.match(key): return True\n return False\n\n filtered_keys = [\n k\n for k in self.context.redis.hkeys(micra_structures.key)\n if not ids or key_matches(keys={k}, regexes=id_regexes)\n ]\n if not filtered_keys:\n return []\n\n structures = [\n Structure.from_dict(json.loads(v))\n for v in self.context.redis.hmget(micra_structures.key, filtered_keys)\n ]\n tag_regexes = list(map(re.compile, tags))\n structures = list(filter(lambda s: not tags or key_matches(keys=s.tags, regexes=tag_regexes), structures))\n return structures\n\n return click_command\n\nclass SubprocessCommand(CoordinatorCommand[Coordinator]):\n @property\n def category(self) -> CommandCategory:\n return CommandCategory.caution\n\n @property\n def name(self) -> str:\n return 'subprocess'\n\n @property\n def click_command(self) -> click.Command:\n @click.group(name=self.name)\n @click.argument('pid', type=int)\n @click.pass_context\n def click_command(ctx: any, pid:int):\n ctx.obj = pid\n\n @click_command.command(name='set')\n @click.argument('command')\n @click.pass_obj\n def set_command(pid: int, command: str):\n self.context.add_subprocess(pid=pid, command=command)\n\n @click_command.command()\n @click.pass_obj\n def clear(pid: int):\n self.context.remove_subprocess(pid=pid)\n\n @click_command.command()\n @click.pass_obj\n def terminate(pid: int):\n try:\n os.kill(pid, signal.SIGTERM)\n except ProcessLookupError:\n pass\n\n @click_command.command()\n @click.pass_obj\n def kill(pid: int):\n try:\n os.kill(pid, signal.SIGKILL)\n except ProcessLookupError:\n pass\n\n return click_command\n\n @property\n def can_run(self) -> bool:\n return self.context.running\n\nclass MessageCommand(CoordinatorCommand[Coordinator]):\n @property\n def category(self) -> CommandCategory:\n return CommandCategory.caution\n\n @property\n def name(self) -> str:\n return 'message'\n\n @property\n def click_command(self) -> click.Command:\n @click.group(name=self.name)\n @click.argument('key')\n @click.pass_context\n def click_command(ctx: any, key:str):\n ctx.obj = key\n\n @click_command.command(name='set')\n @click.argument('message')\n @click.pass_obj\n def set_command(key: str, message: str):\n self.context.add_message(key=key, message=message)\n\n @click_command.command()\n @click.pass_obj\n def clear(key: str):\n self.context.remove_message(key=key)\n\n return click_command\n\nclass ListenCommand(OutputCommand[Coordinator]):\n @property\n def category(self) -> CommandCategory:\n return CommandCategory.caution\n\n @property\n def name(self) -> str:\n return 'listen'\n\n @property\n def click_command(self) -> click.Command:\n @click.group(name=self.name)\n def click_command():\n pass\n\n @click_command.command()\n @click.argument('listener')\n @click.argument('starter_args', nargs=-1)\n def start(listener: str, starter_args: List[str]):\n self.context.listener_starters[listener](*starter_args)\n\n @click_command.command()\n @click.argument('thread_id', type=int)\n def stop(thread_id: int):\n self.context.stop_listener(thread_id=thread_id)\n\n @click_command.command(name='list')\n @self.decorate\n def list_command():\n return list(sorted(self.context.listener_starters.keys()))\n\n return click_command\n\n @property\n def can_run(self) -> bool:\n return self.context.running\n\nclass ForwardCommand(CoordinatorCommand[Coordinator]):\n _name: str\n key: str\n\n def __init__(self, name: str, key: str, context: CoordinatorCommand):\n self._name = name\n self.key = key\n super().__init__(context=context)\n\n @property\n def category(self) -> CommandCategory:\n return CommandCategory.caution\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def click_command(self) -> click.Command:\n @click.command(name=self.name)\n @click.argument('forward_args', nargs=-1)\n @click.pass_context\n def click_command(ctx: any, forward_args: List[str]):\n self.context.redis.lpush(self.key, self.quote_command(command_args=forward_args))\n\n return click_command\n\n @property\n def can_run(self) -> bool:\n return self.context.running\n", "sub_path": "micra_store/command/coordinator_commands.py", "file_name": "coordinator_commands.py", "file_ext": "py", "file_size_in_byte": 13163, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "enum.Enum", "line_number": 24, "usage_type": "name"}, {"api_name": "redis.Redis", "line_number": 28, "usage_type": "name"}, {"api_name": "structure.ContentType.from_dict", "line_number": 30, "usage_type": "call"}, {"api_name": "structure.ContentType", "line_number": 30, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 30, "usage_type": "call"}, {"api_name": "redis.hgetall", "line_number": 30, "usage_type": "call"}, {"api_name": "structure.micra_content_types.key", "line_number": 30, "usage_type": "attribute"}, {"api_name": "structure.micra_content_types", "line_number": 30, "usage_type": "name"}, {"api_name": "structure.Structure.from_dict", "line_number": 32, "usage_type": "call"}, {"api_name": "structure.Structure", "line_number": 32, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 32, "usage_type": "call"}, {"api_name": "redis.hgetall", "line_number": 32, "usage_type": "call"}, {"api_name": "structure.micra_structures.key", "line_number": 32, "usage_type": "attribute"}, {"api_name": "structure.micra_structures", "line_number": 32, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 28, "usage_type": "name"}, {"api_name": "structure.Element", "line_number": 28, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 34, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 41, "usage_type": "name"}, {"api_name": "redis.Redis", "line_number": 41, "usage_type": "name"}, {"api_name": "structure.Structure", "line_number": 45, "usage_type": "argument"}, {"api_name": "pprint.pformat", "line_number": 58, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 74, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 77, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 80, "usage_type": "call"}, {"api_name": "structure.get_data_frame", "line_number": 82, "usage_type": "call"}, {"api_name": "structure.identifier", "line_number": 83, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 87, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 88, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 96, "usage_type": "call"}, {"api_name": "IPython.terminal.embed.InteractiveShellEmbed", "line_number": 101, "usage_type": "call"}, {"api_name": "IPython.terminal", "line_number": 101, "usage_type": "attribute"}, {"api_name": "click.option", "line_number": 109, "usage_type": "call"}, {"api_name": "click.Choice", "line_number": 109, "usage_type": "call"}, {"api_name": "click.option", "line_number": 110, "usage_type": "call"}, {"api_name": "click.option", "line_number": 111, "usage_type": "call"}, {"api_name": "typing.TypeVar", "line_number": 117, "usage_type": "call"}, {"api_name": "coordinator.Coordinator", "line_number": 117, "usage_type": "argument"}, {"api_name": "typing.Generic", "line_number": 118, "usage_type": "name"}, {"api_name": "command_base.Command", "line_number": 118, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 124, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 133, "usage_type": "name"}, {"api_name": "click.command", "line_number": 138, "usage_type": "call"}, {"api_name": "click.Command", "line_number": 137, "usage_type": "attribute"}, {"api_name": "coordinator.Coordinator", "line_number": 147, "usage_type": "name"}, {"api_name": "command_base.CommandCategory.caution", "line_number": 150, "usage_type": "attribute"}, {"api_name": "command_base.CommandCategory", "line_number": 150, "usage_type": "name"}, {"api_name": "command_base.CommandCategory", "line_number": 149, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 157, "usage_type": "name"}, {"api_name": "error.MicraQuit", "line_number": 164, "usage_type": "call"}, {"api_name": "click.command", "line_number": 162, "usage_type": "call"}, {"api_name": "click.Command", "line_number": 161, "usage_type": "attribute"}, {"api_name": "typing.Generic", "line_number": 172, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 174, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 174, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 181, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 181, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 188, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 198, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 199, "usage_type": "name"}, {"api_name": "coordinator.Coordinator", "line_number": 210, "usage_type": "name"}, {"api_name": "command_base.CommandCategory.info", "line_number": 213, "usage_type": "attribute"}, {"api_name": "command_base.CommandCategory", "line_number": 213, "usage_type": "name"}, {"api_name": "command_base.CommandCategory", "line_number": 212, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 220, "usage_type": "name"}, {"api_name": "click.command", "line_number": 225, "usage_type": "call"}, {"api_name": "click.Command", "line_number": 224, "usage_type": "attribute"}, {"api_name": "coordinator.Coordinator", "line_number": 231, "usage_type": "name"}, {"api_name": "command_base.CommandCategory.info", "line_number": 234, "usage_type": "attribute"}, {"api_name": "command_base.CommandCategory", "line_number": 234, "usage_type": "name"}, {"api_name": "command_base.CommandCategory", "line_number": 233, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 241, "usage_type": "name"}, {"api_name": "moda.style.Styleds", "line_number": 255, "usage_type": "call"}, {"api_name": "moda.style.CustomStyled", "line_number": 256, "usage_type": "call"}, {"api_name": "moda.style.Format", "line_number": 256, "usage_type": "call"}, {"api_name": "moda.style.CustomStyled", "line_number": 257, "usage_type": "call"}, {"api_name": "moda.style.Format", "line_number": 257, "usage_type": "call"}, {"api_name": "click.command", "line_number": 246, "usage_type": "call"}, {"api_name": "click.option", "line_number": 247, "usage_type": "call"}, {"api_name": "click.Choice", "line_number": 247, "usage_type": "call"}, {"api_name": "click.Command", "line_number": 245, "usage_type": "attribute"}, {"api_name": "coordinator.Coordinator", "line_number": 262, "usage_type": "name"}, {"api_name": "command_base.CommandCategory.info", "line_number": 265, "usage_type": "attribute"}, {"api_name": "command_base.CommandCategory", "line_number": 265, "usage_type": "name"}, {"api_name": "command_base.CommandCategory", "line_number": 264, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 272, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 281, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 282, "usage_type": "attribute"}, {"api_name": "typing.Set", "line_number": 283, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 283, "usage_type": "name"}, {"api_name": "re.Pattern", "line_number": 283, "usage_type": "attribute"}, {"api_name": "structure.micra_structures.key", "line_number": 291, "usage_type": "attribute"}, {"api_name": "structure.micra_structures", "line_number": 291, "usage_type": "name"}, {"api_name": "structure.Structure.from_dict", "line_number": 298, "usage_type": "call"}, {"api_name": "structure.Structure", "line_number": 298, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 298, "usage_type": "call"}, {"api_name": "structure.micra_structures.key", "line_number": 299, "usage_type": "attribute"}, {"api_name": "structure.micra_structures", "line_number": 299, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 301, "usage_type": "attribute"}, {"api_name": "click.command", "line_number": 277, "usage_type": "call"}, {"api_name": "click.option", "line_number": 278, "usage_type": "call"}, {"api_name": "click.option", "line_number": 279, "usage_type": "call"}, {"api_name": "click.Command", "line_number": 276, "usage_type": "attribute"}, {"api_name": "coordinator.Coordinator", "line_number": 307, "usage_type": "name"}, {"api_name": "command_base.CommandCategory.caution", "line_number": 310, "usage_type": "attribute"}, {"api_name": "command_base.CommandCategory", "line_number": 310, "usage_type": "name"}, {"api_name": "command_base.CommandCategory", "line_number": 309, "usage_type": "name"}, {"api_name": "click.group", "line_number": 318, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 319, "usage_type": "call"}, {"api_name": "click.pass_context", "line_number": 320, "usage_type": "attribute"}, {"api_name": "click.argument", "line_number": 325, "usage_type": "call"}, {"api_name": "click.pass_obj", "line_number": 326, "usage_type": "attribute"}, {"api_name": "click.pass_obj", "line_number": 331, "usage_type": "attribute"}, {"api_name": "os.kill", "line_number": 339, "usage_type": "call"}, {"api_name": "signal.SIGTERM", "line_number": 339, "usage_type": "attribute"}, {"api_name": "click.pass_obj", "line_number": 336, "usage_type": "attribute"}, {"api_name": "os.kill", "line_number": 347, "usage_type": "call"}, {"api_name": "signal.SIGKILL", "line_number": 347, "usage_type": "attribute"}, {"api_name": "click.pass_obj", "line_number": 344, "usage_type": "attribute"}, {"api_name": "click.Command", "line_number": 317, "usage_type": "attribute"}, {"api_name": "coordinator.Coordinator", "line_number": 357, "usage_type": "name"}, {"api_name": "command_base.CommandCategory.caution", "line_number": 360, "usage_type": "attribute"}, {"api_name": "command_base.CommandCategory", "line_number": 360, "usage_type": "name"}, {"api_name": "command_base.CommandCategory", "line_number": 359, "usage_type": "name"}, {"api_name": "click.group", "line_number": 368, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 369, "usage_type": "call"}, {"api_name": "click.pass_context", "line_number": 370, "usage_type": "attribute"}, {"api_name": "click.argument", "line_number": 375, "usage_type": "call"}, {"api_name": "click.pass_obj", "line_number": 376, "usage_type": "attribute"}, {"api_name": "click.pass_obj", "line_number": 381, "usage_type": "attribute"}, {"api_name": "click.Command", "line_number": 367, "usage_type": "attribute"}, {"api_name": "coordinator.Coordinator", "line_number": 387, "usage_type": "name"}, {"api_name": "command_base.CommandCategory.caution", "line_number": 390, "usage_type": "attribute"}, {"api_name": "command_base.CommandCategory", "line_number": 390, "usage_type": "name"}, {"api_name": "command_base.CommandCategory", "line_number": 389, "usage_type": "name"}, {"api_name": "click.group", "line_number": 398, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 405, "usage_type": "name"}, {"api_name": "click.argument", "line_number": 403, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 404, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 409, "usage_type": "call"}, {"api_name": "click.Command", "line_number": 397, "usage_type": "attribute"}, {"api_name": "coordinator.Coordinator", "line_number": 424, "usage_type": "name"}, {"api_name": "command_base.CommandCategory.caution", "line_number": 435, "usage_type": "attribute"}, {"api_name": "command_base.CommandCategory", "line_number": 435, "usage_type": "name"}, {"api_name": "command_base.CommandCategory", "line_number": 434, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 446, "usage_type": "name"}, {"api_name": "click.command", "line_number": 443, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 444, "usage_type": "call"}, {"api_name": "click.pass_context", "line_number": 445, "usage_type": "attribute"}, {"api_name": "click.Command", "line_number": 442, "usage_type": "attribute"}]} +{"seq_id": "613793137", "text": "from application import app, db\nfrom application.lists.models import Watchlist\nfrom application.lists.forms import ListForm\nfrom application.lists.forms import EditForm\n\nfrom application.content.models import Content\n\nfrom flask import redirect, render_template, request, url_for, flash\nfrom flask_login import login_required, current_user\n\n@app.route(\"/lists\", methods=[\"GET\"])\n@login_required\ndef lists_index():\n lists = Watchlist.query.filter_by(account_id=current_user.id).all()\n return render_template(\"lists/list.html\", lists = lists, total_length=Content.total_length_for_a_user(current_user.id))\n\n\n@app.route(\"/lists/new/\")\n@login_required\ndef lists_form():\n return render_template(\"lists/new.html\", form = ListForm())\n\n@app.route(\"/lists/\", methods=[\"POST\"])\n@login_required\ndef lists_create():\n form = ListForm(request.form)\n\n if not form.validate():\n return render_template(\"lists/new.html\", form = form)\n\n l = Watchlist(form.name.data)\n l.account_id = current_user.id\n\n db.session().add(l)\n db.session().commit()\n\n return redirect(url_for(\"lists_index\"))\n \n@app.route(\"/lists//edit\", methods=[\"GET\", \"POST\"])\n@login_required\ndef lists_update(list_id):\n\n l = Watchlist.query.get(list_id)\n acc = l.account_id\n if not acc == current_user.id:\n flash(\"Access denied. Please, select a watchlist.\", category=\"warning\")\n return redirect(url_for(\"lists_index\"))\n\n \n if request.method == \"GET\":\n return render_template(\"lists/edit.html\", form = EditForm(), list_id=list_id, name = l.name)\n\n if request.method == \"POST\":\n form = EditForm(request.form)\n\n if not form.validate():\n return render_template(\"lists/edit.html\", form = form)\n\n l.name = form.name.data\n\n db.session().add(l)\n db.session().commit()\n\n return redirect(url_for(\"content_for_list\", list_id=list_id))\n\n\n@app.route(\"/lists/delete/\", methods=[\"POST\"])\n@login_required\ndef lists_delete(list_id):\n\n l = Watchlist.query.get(list_id)\n watchlist_content = l.content\n\n for c in watchlist_content:\n db.session().delete(c)\n db.session().commit()\n \n db.session().delete(l)\n db.session().commit()\n\n return redirect(url_for(\"lists_index\"))\n", "sub_path": "application/lists/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2281, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "application.lists.models.Watchlist.query.filter_by", "line_number": 14, "usage_type": "call"}, {"api_name": "application.lists.models.Watchlist.query", "line_number": 14, "usage_type": "attribute"}, {"api_name": "application.lists.models.Watchlist", "line_number": 14, "usage_type": "name"}, {"api_name": "flask_login.current_user.id", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 14, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 15, "usage_type": "call"}, {"api_name": "application.content.models.Content.total_length_for_a_user", "line_number": 15, "usage_type": "call"}, {"api_name": "application.content.models.Content", "line_number": 15, "usage_type": "name"}, {"api_name": "flask_login.current_user.id", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 15, "usage_type": "name"}, {"api_name": "application.app.route", "line_number": 11, "usage_type": "call"}, {"api_name": "application.app", "line_number": 11, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 12, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 21, "usage_type": "call"}, {"api_name": "application.lists.forms.ListForm", "line_number": 21, "usage_type": "call"}, {"api_name": "application.app.route", "line_number": 18, "usage_type": "call"}, {"api_name": "application.app", "line_number": 18, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 19, "usage_type": "name"}, {"api_name": "application.lists.forms.ListForm", "line_number": 26, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 26, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 26, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 29, "usage_type": "call"}, {"api_name": "application.lists.models.Watchlist", "line_number": 31, "usage_type": "call"}, {"api_name": "flask_login.current_user.id", "line_number": 32, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 32, "usage_type": "name"}, {"api_name": "application.db.session", "line_number": 34, "usage_type": "call"}, {"api_name": "application.db", "line_number": 34, "usage_type": "name"}, {"api_name": "application.db.session", "line_number": 35, "usage_type": "call"}, {"api_name": "application.db", "line_number": 35, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 37, "usage_type": "call"}, {"api_name": "application.app.route", "line_number": 23, "usage_type": "call"}, {"api_name": "application.app", "line_number": 23, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 24, "usage_type": "name"}, {"api_name": "application.lists.models.Watchlist.query.get", "line_number": 43, "usage_type": "call"}, {"api_name": "application.lists.models.Watchlist.query", "line_number": 43, "usage_type": "attribute"}, {"api_name": "application.lists.models.Watchlist", "line_number": 43, "usage_type": "name"}, {"api_name": "flask_login.current_user.id", "line_number": 45, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 45, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 50, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 50, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 51, "usage_type": "call"}, {"api_name": "application.lists.forms.EditForm", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 53, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 53, "usage_type": "name"}, {"api_name": "application.lists.forms.EditForm", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 54, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 54, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 57, "usage_type": "call"}, {"api_name": "application.db.session", "line_number": 61, "usage_type": "call"}, {"api_name": "application.db", "line_number": 61, "usage_type": "name"}, {"api_name": "application.db.session", "line_number": 62, "usage_type": "call"}, {"api_name": "application.db", "line_number": 62, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 64, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 64, "usage_type": "call"}, {"api_name": "application.app.route", "line_number": 39, "usage_type": "call"}, {"api_name": "application.app", "line_number": 39, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 40, "usage_type": "name"}, {"api_name": "application.lists.models.Watchlist.query.get", "line_number": 71, "usage_type": "call"}, {"api_name": "application.lists.models.Watchlist.query", "line_number": 71, "usage_type": "attribute"}, {"api_name": "application.lists.models.Watchlist", "line_number": 71, "usage_type": "name"}, {"api_name": "application.db.session", "line_number": 75, "usage_type": "call"}, {"api_name": "application.db", "line_number": 75, "usage_type": "name"}, {"api_name": "application.db.session", "line_number": 76, "usage_type": "call"}, {"api_name": "application.db", "line_number": 76, "usage_type": "name"}, {"api_name": "application.db.session", "line_number": 78, "usage_type": "call"}, {"api_name": "application.db", "line_number": 78, "usage_type": "name"}, {"api_name": "application.db.session", "line_number": 79, "usage_type": "call"}, {"api_name": "application.db", "line_number": 79, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 81, "usage_type": "call"}, {"api_name": "application.app.route", "line_number": 67, "usage_type": "call"}, {"api_name": "application.app", "line_number": 67, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 68, "usage_type": "name"}]} +{"seq_id": "650703525", "text": "from torch import Tensor\nfrom torch.nn import Module\n\nfrom .bs.black_scholes import BlackScholes\nfrom .clamp import Clamp\n\n\nclass WhalleyWilmott(Module):\n \"\"\"Initialize Whalley-Wilmott's hedging strategy of a derivative.\n\n The `forward` method returns the next hedge ratio.\n\n This is the optimal hedging strategy for asymptotically small transaction cost.\n\n Args:\n derivative (:class:`pfhedge.instruments.Derivative`): Derivative to hedge.\n a (float, default=1.0): Risk aversion parameter in exponential utility.\n\n Shape:\n - Input: :math:`(N, *, H_{\\\\text{in}})`. Here, :math:`*` means any number of\n additional dimensions and `H_in` is the number of input features.\n See `inputs()` for the names of input features.\n - Output: :math:`(N, *, 1)`. The hedge ratio at the next time step.\n\n Examples:\n\n An example for :class:`pfhedge.instruments.EuropeanOption`.\n\n >>> import torch\n >>> from pfhedge.nn import WhalleyWilmott\n >>> from pfhedge.instruments import BrownianStock\n >>> from pfhedge.instruments import EuropeanOption\n >>> derivative = EuropeanOption(BrownianStock(cost=1e-5))\n >>>\n >>> m = WhalleyWilmott(derivative)\n >>> m.inputs()\n ['log_moneyness', 'expiry_time', 'volatility', 'prev_hedge']\n >>> input = torch.tensor([\n ... [-0.05, 0.1, 0.2, 0.5],\n ... [-0.01, 0.1, 0.2, 0.5],\n ... [ 0.00, 0.1, 0.2, 0.5],\n ... [ 0.01, 0.1, 0.2, 0.5],\n ... [ 0.05, 0.1, 0.2, 0.5]])\n >>> m(input)\n tensor([[0.2946],\n [0.5000],\n [0.5000],\n [0.5000],\n [0.7284]])\n\n An example for :class:`pfhedge.instruments.EuropeanOption` without cost.\n\n >>> derivative = EuropeanOption(BrownianStock())\n >>> m = WhalleyWilmott(derivative)\n >>> m.inputs()\n ['log_moneyness', 'expiry_time', 'volatility', 'prev_hedge']\n >>> input = torch.tensor([\n ... [-0.05, 0.1, 0.2, 0.5],\n ... [-0.01, 0.1, 0.2, 0.5],\n ... [ 0.00, 0.1, 0.2, 0.5],\n ... [ 0.01, 0.1, 0.2, 0.5],\n ... [ 0.05, 0.1, 0.2, 0.5]])\n >>> m(input)\n tensor([[0.2239],\n [0.4497],\n [0.5126],\n [0.5752],\n [0.7945]])\n\n References:\n - Whalley, A.E. and Wilmott, P., An asymptotic analysis of an optimal hedging\n model for option pricing with transaction costs. Mathematical Finance,\n 1997, 7, 307–324.\n \"\"\"\n\n def __init__(self, derivative, a: float = 1.0):\n super().__init__()\n self.derivative = derivative\n self.a = a\n\n self.bs = BlackScholes(derivative)\n self.clamp = Clamp()\n\n def inputs(self) -> list:\n \"\"\"Returns the names of input features.\n\n Returns:\n list\n \"\"\"\n return self.bs.inputs() + [\"prev_hedge\"]\n\n def extra_repr(self):\n return f\"a={self.a}\" if self.a != 1 else \"\"\n\n def forward(self, input: Tensor) -> Tensor:\n prev_hedge = input[..., [-1]]\n\n delta = self.bs(input[..., :-1])\n width = self.width(input[..., :-1])\n min = delta - width\n max = delta + width\n\n return self.clamp(prev_hedge, min=min, max=max)\n\n def width(self, input: Tensor) -> Tensor:\n \"\"\"Returns half-width of the no-transaction band.\n\n Args:\n input (Tensor): The input tensor.\n\n Shape:\n - Input: :math:`(N, *, H_{\\\\text{in}} - 1)`\n - Output: :math:`(N, *, 1)`\n\n Returns:\n torch.Tensor\n \"\"\"\n cost = self.derivative.underlier.cost\n\n spot = self.derivative.strike * input[..., [0]].exp()\n gamma = self.bs.gamma(*(input[..., [i]] for i in range(input.size(-1))))\n width = (cost * (3 / 2) * (gamma ** 2) * spot / self.a) ** (1 / 3)\n\n return width\n", "sub_path": "pfhedge/nn/modules/ww.py", "file_name": "ww.py", "file_ext": "py", "file_size_in_byte": 3979, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "torch.nn.Module", "line_number": 8, "usage_type": "name"}, {"api_name": "bs.black_scholes.BlackScholes", "line_number": 81, "usage_type": "call"}, {"api_name": "clamp.Clamp", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 95, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 105, "usage_type": "name"}]} +{"seq_id": "344625321", "text": "from setuptools import setup\nimport os, re\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n\ndef get_version() -> str:\n \"\"\"Get __version__ from __init__.py file.\"\"\"\n version_file = os.path.join(os.path.dirname(__file__), \"kivytransitions\", \"__init__.py\")\n version_file_data = open(version_file, \"rt\", encoding=\"utf-8\").read()\n version_regex = r\"(?<=^__version__ = ['\\\"])[^'\\\"]+(?=['\\\"]$)\"\n try:\n version = re.findall(version_regex, version_file_data, re.M)[0]\n return version\n except IndexError:\n raise ValueError(f\"Unable to find version string in {version_file}.\")\n\n\nsetup(\n name=\"kivytransitions\",\n version=get_version(),\n packages=[\"kivytransitions\"],\n package_data={\"kivytransitions\": [\"*.py\", \"transitions/*\", \"transitions/extra/*\"],},\n # metadata to display on PyPI\n author=\"Shashi Ranjan\",\n author_email=\"shashiranjankv@gmail.com\",\n description=\"A variety of custom screen transitions for kivy\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n keywords=\"transitions shaders kivy-application kivy python\",\n url=\"https://github.com/shashi278/KivyShaderTransitions\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: Android\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: OS Independent\"\n ],\n install_requires=[\"kivy\"],\n\n python_requires=\">=3.6\",\n)", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1525, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 10, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 14, "usage_type": "call"}, {"api_name": "re.M", "line_number": 14, "usage_type": "attribute"}, {"api_name": "setuptools.setup", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "505493366", "text": "from django.conf import settings\nfrom templated_email import send_templated_mail\n\nfrom ..celeryconf import app\nfrom ..core.emails import get_email_base_context\nfrom .models import GeneralInquiry, DesignerInquiry\n# from ..core.utils import build_absolute_uri\n\nDEV_RECIPIENT = \"adrienshen.dev@gmail.com\"\n\ndef collect_data_for_general_inquiry(pk):\n inquiry = GeneralInquiry.objects.get(pk=pk)\n email_context = get_email_base_context()\n email_context[\"inquiry\"] = inquiry\n return {\n \"template_name\": \"contact/inquiry_form\",\n \"from_email\": settings.DEFAULT_FROM_EMAIL,\n \"context\": email_context,\n \"recipient_list\": [DEV_RECIPIENT],\n }\n\n\ndef collect_data_for_designer_inquiry(pk):\n inquiry = DesignerInquiry.objects.get(pk=pk)\n context = get_email_base_context()\n context[\"inquiry\"] = inquiry\n return {\n \"template_name\": \"contact/designer_inquiry_form\",\n \"from_email\": settings.DEFAULT_FROM_EMAIL,\n \"context\": context,\n \"recipient_list\": [DEV_RECIPIENT],\n }\n\n\n@app.task\ndef send_general_inquiry_email(pk):\n email_data = collect_data_for_general_inquiry(pk)\n send_templated_mail(**email_data)\n\n\n@app.task\ndef send_designer_inquiry_email(pk):\n email_data = collect_data_for_designer_inquiry(pk)\n\n # breakpoint()\n send_templated_mail(**email_data)\n", "sub_path": "saleor/contact/emails.py", "file_name": "emails.py", "file_ext": "py", "file_size_in_byte": 1340, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "models.GeneralInquiry.objects.get", "line_number": 12, "usage_type": "call"}, {"api_name": "models.GeneralInquiry.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "models.GeneralInquiry", "line_number": 12, "usage_type": "name"}, {"api_name": "core.emails.get_email_base_context", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.settings.DEFAULT_FROM_EMAIL", "line_number": 17, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 17, "usage_type": "name"}, {"api_name": "models.DesignerInquiry.objects.get", "line_number": 24, "usage_type": "call"}, {"api_name": "models.DesignerInquiry.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "models.DesignerInquiry", "line_number": 24, "usage_type": "name"}, {"api_name": "core.emails.get_email_base_context", "line_number": 25, "usage_type": "call"}, {"api_name": "django.conf.settings.DEFAULT_FROM_EMAIL", "line_number": 29, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 29, "usage_type": "name"}, {"api_name": "templated_email.send_templated_mail", "line_number": 38, "usage_type": "call"}, {"api_name": "celeryconf.app.task", "line_number": 35, "usage_type": "attribute"}, {"api_name": "celeryconf.app", "line_number": 35, "usage_type": "name"}, {"api_name": "templated_email.send_templated_mail", "line_number": 46, "usage_type": "call"}, {"api_name": "celeryconf.app.task", "line_number": 41, "usage_type": "attribute"}, {"api_name": "celeryconf.app", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "59591568", "text": "from prettytable import PrettyTable\r\n\r\ndef getFile():\r\n global text\r\n text = []\r\n with open(\"lab3.txt\", \"r\") as file:\r\n lines = file.readlines()\r\n for i in lines:\r\n text.append(i.replace('\\n', ''))\r\n for i in range(len(text)):\r\n text[i] = text[i].split(\" \")\r\n\r\ndef List_Cand():\r\n global list_of_candidates\r\n list_of_candidates = []\r\n for i in range(1, len(text[0])):\r\n list_of_candidates.append(text[0][i])\r\n list_of_candidates.sort()\r\n\r\n\r\ndef Condorcet():\r\n global list_of_results_condorce\r\n list_of_results_condorce = []\r\n global result_condorce\r\n result_condorce = [0, 0]\r\n\r\n for i in list_of_candidates:\r\n compare = list_of_candidates.copy()\r\n compare.remove(i)\r\n amount_general = []\r\n\r\n for k in compare:\r\n amount = 0\r\n for z in range(len(text)):\r\n if text[z].index(i) < text[z].index(k):\r\n amount += int(text[z][0])\r\n amount_general.append(amount)\r\n amount_general.sort()\r\n if amount_general[-1] > result_condorce[1]:\r\n result_condorce = [i, amount_general[-1]]\r\n list_of_results_condorce.append([i, amount_general[-1]])\r\n\r\n\r\ndef Borde():\r\n number_of_candidates = len(text[1])\r\n global list_of_results_borde\r\n list_of_results_borde = []\r\n global result_borde\r\n result_borde = [0, 0]\r\n\r\n for k in list_of_candidates:\r\n amount = 0\r\n for i in range(len(text)):\r\n amount += int(text[i][0]) * ((number_of_candidates-1)-text[i].index(k))\r\n\r\n if amount > result_borde[1]:\r\n result_borde = [k, amount]\r\n list_of_results_borde.append([k, amount])\r\n\r\n\r\ngetFile()\r\nList_Cand()\r\nCondorcet()\r\nBorde()\r\n\r\ntable = PrettyTable(['Num of voters', 'A', 'B', 'C', \"Results\"])\r\nfor i in range(len(text)):\r\n table.add_row([text[i][0], text[i].index('A'), text[i].index('B'), text[i].index('C'), \"\"])\r\n\r\ntable.add_row(['Condorce', list_of_results_condorce[0][1], list_of_results_condorce[1][1], list_of_results_condorce[2][1], result_condorce[0]])\r\ntable.add_row(['Borde', list_of_results_borde[0][1], list_of_results_borde[1][1], list_of_results_borde[2][1], result_borde[0]])\r\nprint(table)\r\n", "sub_path": "lab3.py", "file_name": "lab3.py", "file_ext": "py", "file_size_in_byte": 2252, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "prettytable.PrettyTable", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "36960874", "text": "# -*- coding: utf-8 -*-\n\nfrom odoo import fields, models, api, exceptions, _\nfrom datetime import datetime, timedelta\n\n\nPROGRESS_INFO = [('draft', 'Draft'), ('verified', 'Verified')]\nAVAIL_PROGRESS = [('full_day', 'Full Day'), ('half_day', 'Half Day'), ('absent', 'Absent')]\nDAY_PROGRESS = [('holiday', 'Holiday'), ('working_day', 'Working Day')]\n\n\n# Employee Attendance\nclass EmployeeAttendance(models.Model):\n _name = \"employee.attendance\"\n\n shift_id = fields.Many2one(comodel_name=\"time.shift\", string=\"Shift\", readonly=True)\n person_id = fields.Many2one(comodel_name=\"arc.person\", string=\"Employee\", readonly=True)\n attendance_id = fields.Many2one(comodel_name=\"daily.attendance\", string=\"Attendance\", readonly=True)\n expected_from_time = fields.Datetime(string=\"Expected From Time\", readonly=True)\n actual_from_time = fields.Datetime(string=\"Actual From Time\", readonly=True)\n expected_till_time = fields.Datetime(string=\"Expected Till Time\", readonly=True)\n actual_till_time = fields.Datetime(string=\"Actual Till Time\", readonly=True)\n expected_hours = fields.Float(string=\"Expected Hours\", default=0, readonly=True)\n actual_hours = fields.Float(string=\"Actual Hours\", default=0, readonly=True)\n permission_hours = fields.Float(string=\"Permission Hours\", default=0, readonly=True)\n on_duty_hours = fields.Float(string=\"On Duty Hours\", default=0, readonly=True)\n day_progress = fields.Selection(DAY_PROGRESS, string='Day Status', readonly=True)\n availability_progress = fields.Selection(AVAIL_PROGRESS, string='Availability Status')\n progress = fields.Selection(selection=PROGRESS_INFO, string='Progress', related='attendance_id.progress')\n\n @api.multi\n def update_hours(self):\n if self.expected_from_time and self.expected_till_time:\n expected_from_time = datetime.strptime(self.expected_from_time, \"%Y-%m-%d %H:%M:%S\")\n expected_till_time = datetime.strptime(self.expected_till_time, \"%Y-%m-%d %H:%M:%S\")\n self.expected_hours = (expected_till_time - expected_from_time).total_seconds()/(60 * 60)\n\n if self.actual_from_time and self.actual_till_time:\n actual_from_time = datetime.strptime(self.actual_from_time, \"%Y-%m-%d %H:%M:%S\")\n actual_till_time = datetime.strptime(self.actual_till_time, \"%Y-%m-%d %H:%M:%S\")\n self.actual_hours = (actual_till_time - actual_from_time).total_seconds() / (60 * 60)\n\n @api.multi\n def trigger_get_availability_progress(self):\n config = self.env[\"time.config\"].search([(\"company_id\", \"=\", self.env.user.company_id.id)])\n full_day = config.full_day\n half_day = config.half_day\n total_hours = self.actual_hours + self.permission_hours + self.on_duty_hours\n\n if total_hours >= full_day:\n self.availability_progress = \"full_day\"\n elif total_hours >= half_day:\n self.availability_progress = \"half_day\"\n else:\n self.availability_progress = \"absent\"\n\n _sql_constraints = [('unique_attendance_detail',\n 'unique (attendance_id, person_id)',\n 'Error! Employee should not repeated')]\n", "sub_path": "models/time_management/employee_attendance.py", "file_name": "employee_attendance.py", "file_ext": "py", "file_size_in_byte": 3183, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "odoo.models.Model", "line_number": 13, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 13, "usage_type": "name"}, {"api_name": "odoo.fields.Many2one", "line_number": 16, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 16, "usage_type": "name"}, {"api_name": "odoo.fields.Many2one", "line_number": 17, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 17, "usage_type": "name"}, {"api_name": "odoo.fields.Many2one", "line_number": 18, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 18, "usage_type": "name"}, {"api_name": "odoo.fields.Datetime", "line_number": 19, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 19, "usage_type": "name"}, {"api_name": "odoo.fields.Datetime", "line_number": 20, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 20, "usage_type": "name"}, {"api_name": "odoo.fields.Datetime", "line_number": 21, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 21, "usage_type": "name"}, {"api_name": "odoo.fields.Datetime", "line_number": 22, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 22, "usage_type": "name"}, {"api_name": "odoo.fields.Float", "line_number": 23, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 23, "usage_type": "name"}, {"api_name": "odoo.fields.Float", "line_number": 24, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 24, "usage_type": "name"}, {"api_name": "odoo.fields.Float", "line_number": 25, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 25, "usage_type": "name"}, {"api_name": "odoo.fields.Float", "line_number": 26, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 26, "usage_type": "name"}, {"api_name": "odoo.fields.Selection", "line_number": 27, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 27, "usage_type": "name"}, {"api_name": "odoo.fields.Selection", "line_number": 28, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 28, "usage_type": "name"}, {"api_name": "odoo.fields.Selection", "line_number": 29, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 29, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 34, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 34, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 35, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 39, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 40, "usage_type": "name"}, {"api_name": "odoo.api.multi", "line_number": 31, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 31, "usage_type": "name"}, {"api_name": "odoo.api.multi", "line_number": 43, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 43, "usage_type": "name"}]} +{"seq_id": "585727233", "text": "# execfile('word_transformer.py'); \r\n\r\nfrom nltk.corpus import wordnet \r\n\r\nclass word_transformer: \r\n\tdef __init__(self): \r\n\t\tself.morphy_count=[];\r\n\t\r\n\tdef load_brown(self):\r\n\t\tfrom nltk.corpus import brown\r\n\t\tself.set_sents(brown.tagged_sents());\r\n\t\r\n\tdef set_sents(self, sents):\r\n\t\tself.sents=sents; \r\n\t\ttags_count=nltk.FreqDist([(w.lower(),t) for sent in sents for w,t in sent]); \r\n\t\ttype_map=defaultdict(str, {'V':wordnet.VERB, 'D':wordnet.VERB, 'B':wordnet.VERB, 'N':wordnet.NOUN, 'J':wordnet.ADJ, 'R':wordnet.ADV});\r\n\t\tmorphy_count=[(wordnet.morphy(k, (type_map[t[:1]] or wordnet.VERB) ),k,t,tags_count[(k,t)]) for k,t in tags_count.keys()];\r\n\t\tself.morphy_count=[];\r\n\t\tfor (k0,k,t,c) in morphy_count:\r\n\t\t\tif t[:3] in ['JJR', 'JJT']: \r\n\t\t\t\tself.morphy_count.append((self.adj_morphy(k),k,t,c));\r\n\t\t\telif t[:2] in ['BE', 'DO']: \r\n\t\t\t\tself.morphy_count.append((k0,k,'VB'+t[2:],c));\r\n\t\t\t#elif k in ['are', 'is', 'was', 'were']: \r\n\t\t\t#\tself.morphy_count.append(('be',k,'VB'+t[2:],c));\r\n\t\t\telse:\r\n\t\t\t\tself.morphy_count.append((k0,k,t,c));\r\n\t\t\t\t\r\n\tdef get_word(self, word, tag=''): \r\n\t\tspecial={'was':'be', 'were':'be','is':'be','are':'be'}; \r\n\t\tif word.lower() in special.keys(): \r\n\t\t\tw0=special[word.lower()];\r\n\t\t\tspecial2={ ('be', ''): 'be', ('be', 'VB'): 'be', \r\n\t\t\t\t\t('be','VBZ'):'is', ('be','VBZS'):'is', ('be','VBZP'):'are', \r\n\t\t\t\t\t('be','VBD'):'was', ('be','VBDS'):'was', ('be','VBDP'):'were', \r\n\t\t\t\t\t('be','VBN'):'been', ('be','VBNS'):'been', ('be','VBNP'):'been', \r\n\t\t\t\t\t('be','VBG'):'being', ('be','VBGS'):'being', ('be','VBGP'):'being', \r\n\t\t\t\t};\r\n\t\t\tif (w0, tag) in special2.keys(): \r\n\t\t\t\treturn special2[(w0, tag)]; \r\n\t\telse:\r\n\t\t\ttype_map=defaultdict(str, {'V':wordnet.VERB, 'D':wordnet.VERB, 'B':wordnet.VERB, 'N':wordnet.NOUN, 'J':wordnet.ADJ, 'R':wordnet.ADV});\r\n\t\t\tw0=wordnet.morphy(word.lower(), (type_map[tag[:1]] or wordnet.VERB));\r\n\t\tif tag[:1]=='J': w0=self.adj_morphy(word);\r\n\t\tif not tag: return w0; \r\n\t\tTL=len(tag); \r\n\t\tw1='';\r\n\t\tword_forms=[(k0,k,t,c) for k0,k,t,c in self.morphy_count if k0==w0];\r\n\t\tif word_forms: \r\n\t\t\tforms=[(k0,k,t,c) for k0,k,t,c in word_forms if not tag or t[:TL]==tag]; \r\n\t\t\tif forms:\r\n\t\t\t\twords=sorted(forms, key=lambda k: k[3], reverse=True);\r\n\t\t\t\tw1=words[0][1]; \r\n\t\tif not w1: \r\n\t\t\tif tag[0]=='V': \r\n\t\t\t\tw1=self.get_verb(word, tag); \r\n\t\t\telif tag[0]=='N':\r\n\t\t\t\tw1=self.get_noun(word, tag); \r\n\t\t\telif tag[0]=='J':\r\n\t\t\t\tw1=self.get_adj(word, tag); \r\n\t\tif not w1: w1=w0; \r\n\t\treturn w1; \r\n\t\t\r\n\tdef get_noun(self, word, tag=''): \r\n\t\ttype=tag[2:3];\r\n\t\ttype_map={'S': self.noun_NNS, '': self.noun_NN};\r\n\t\tif type in type_map.keys(): \r\n\t\t\treturn type_map[type](word);\r\n\t\treturn self.noun_NN(word);\t\t\r\n\t\r\n\tdef get_verb(self, word, tag=''): \r\n\t\ttype=tag[2:3];\r\n\t\ttype_map={'Z': self.verb_VBZ, 'G': self.verb_VBG, 'D': self.verb_VBD, 'N': self.verb_VBD, '': self.verb_VB};\r\n\t\tif type in type_map.keys(): \r\n\t\t\treturn type_map[type](word);\r\n\t\treturn self.verb_VB(word);\r\n\t\t\r\n\tdef get_adj(self, word, tag=''): \r\n\t\ttype=tag[2:3];\r\n\t\ttype_map={'R': self.adj_JJR, 'T': self.adj_JJT, '': self.adj_JJ};\r\n\t\tif type in type_map.keys(): \r\n\t\t\treturn type_map[type](word);\r\n\t\treturn self.noun_JJ(word);\r\n\r\n\tdef adj_morphy(self, word):\r\n\t\tw=word.lower(); \r\n\t\tif w.find('-')>0: \r\n\t\t\tws=w.split('-'); \r\n\t\t\treturn self.adj_morphy(ws[0]) + '-' + '-'.join(ws[1:]);\r\n\t\t#\r\n\t\tspecial={'better':'good', 'best':'good', 'worst':'bad', 'worse':'bad'};\r\n\t\tw0='';\r\n\t\tif w in special.keys(): \r\n\t\t\tw0=special[w]; \r\n\t\telse: \r\n\t\t\tif w[-4:]=='iest': w0=w[:-4]+'y'; \r\n\t\t\telif w[-3:]=='est': w0=w[:-3]; \r\n\t\t\telif w[-3:]=='ier': w0=w[:-3]+'y'; \r\n\t\t\telif w[-2:]=='er': w0=w[:-2]; \r\n\t\t\tif w0 and not wordnet.morphy(w0, wordnet.ADJ): w0=w;\r\n\t\tif not w0: w0=w; \r\n\t\treturn w0;\r\n\t\t\r\n\tdef adj_JJ(self, word):\r\n\t\tw=word.lower();\r\n\t\tif w.find('-')>0: \r\n\t\t\tws=w.split('-'); \r\n\t\t\treturn self.adj_JJ(ws[0]) + '-' + '-'.join(ws[1:]);\r\n\t\tw0=self.adj_morphy(w);\r\n\t\treturn w0;\r\n\t\t\r\n\tdef adj_JJR(self, word):\r\n\t\tw=word.lower();\r\n\t\tif w.find('-')>0: \r\n\t\t\tws=w.split('-'); \r\n\t\t\treturn self.adj_JJR(ws[0]) + '-' + '-'.join(ws[1:]);\r\n\t\tw0=self.adj_morphy(w);\r\n\t\tspecial={'good':'better', 'bad':'worse'};\r\n\t\tw1=''; \r\n\t\tif w0 in special.keys(): \r\n\t\t\tw1=special[w0]; \r\n\t\telif w0[-1:] == 'y': \r\n\t\t\tw1=w0[:-1]+'ier'; \r\n\t\telif w0[-1:]=='e': \r\n\t\t\tw1=w0+'r'; \r\n\t\telse: w1=w0+'er';\r\n\t\treturn w1; \r\n\t\t\r\n\tdef adj_JJT(self, word):\r\n\t\tw=word.lower();\r\n\t\tif w.find('-')>0: \r\n\t\t\tws=w.split('-'); \r\n\t\t\treturn self.adj_JJT(ws[0]) + '-' + '-'.join(ws[1:]);\r\n\t\tw0=self.adj_morphy(w);\r\n\t\tspecial={'good':'best', 'bad':'worst'};\r\n\t\tw1=''; \r\n\t\tif w0 in special.keys(): \r\n\t\t\tw1=special[w0]; \r\n\t\telif w0[-1:] == 'y': \r\n\t\t\tw1=w0[:-1]+'iest'; \r\n\t\telif w0[-1:]=='e': \r\n\t\t\tw1=w0+'st'; \r\n\t\telse: w1=w0+'est';\r\n\t\treturn w1; \r\n\t\t\r\n\t\t\r\n\tdef noun_NN(self, word):\r\n\t\tw0=wordnet.morphy(word.lower(), wordnet.NOUN);\r\n\t\treturn w0;\r\n\t\t\r\n\tdef noun_NNS(self, word):\r\n\t\tw0=wordnet.morphy(word.lower(), wordnet.NOUN);\r\n\t\tw1=''; \r\n\t\tif w0[-1] in 'sx': \r\n\t\t\tw1=w0+'es'; \r\n\t\telif w0[-2:] in ['sh','ch']: \r\n\t\t\tw1=w0+'es'; \r\n\t\telif w0[-1] == 'y': \r\n\t\t\tw1=w0[:-1]+'ies'; \r\n\t\telse: w1=w0+'s';\r\n\t\treturn w1; \r\n\t\t\r\n\tdef verb_VB(self, word):\r\n\t\tw0=wordnet.morphy(word, wordnet.VERB);\r\n\t\treturn w0;\r\n\t\t\r\n\tdef verb_VBG(self, word): \r\n\t\tw0=wordnet.morphy(word.lower(), wordnet.VERB);\r\n\t\tw1=''; \r\n\t\tif w0[-2:]=='ee': \r\n\t\t\tw1=w0+'ing'; \r\n\t\telif w0[-1:]=='e': \r\n\t\t\tw1=w0[:-1]+'ing'; \r\n\t\telse: w1=w0+'ing';\r\n\t\treturn w1; \r\n\t\t\t\r\n\tdef verb_VBD(self, word): \r\n\t\tw0=wordnet.morphy(word.lower(), wordnet.VERB);\r\n\t\tw1=''; \r\n\t\tif w0[-1:]=='e': \r\n\t\t\tw1=w0+'d'; \r\n\t\telif w0[-1:]=='y': \r\n\t\t\tw1=w0[:-1]+'ied'; \r\n\t\telse: w1=w0+'ed';\r\n\t\treturn w1; \r\n\t\t\r\n\tdef verb_VBZ(self, word): \r\n\t\tw0=wordnet.morphy(word, wordnet.VERB);\r\n\t\tw1=''; \r\n\t\tif w0[-1] in 'sx': \r\n\t\t\tw1=w0+'es'; \r\n\t\telif w0[-2:] in ['sh','ch']: \r\n\t\t\tw1=w0+'es'; \r\n\t\telif w0[-1] == 'y': \r\n\t\t\tw1=w0[:-1]+'ies'; \r\n\t\telse: w1=w0+'s';\r\n\t\treturn w1; \r\n\t\r\n\t\r\n\t\r\n\tdef find_words(self, word): \r\n\t\tw0=wordnet.morphy(word.lower());\r\n\t\tfound=[(k0,k,t,c) for k0,k,t,c in self.morphy_count if k0==w0]; \r\n\t\treturn found;\r\n\t\t\r\n\tdef find_tags(self, word='', tag=''): \r\n\t\tTL=len(tag); \r\n\t\tfound=[(k0,k,t,c) for k0,k,t,c in self.morphy_count if (not word or word==k) and (not tag or t[:TL]==tag)]; \r\n\t\ttags=defaultdict(int);\r\n\t\tfor (k0,k,t,c) in found: \r\n\t\t\ttags[t]+=c; \r\n\t\tprint_list(sorted(tags.items(), key=lambda k: k[0])); \r\n\t\treturn tags;\r\n\t\r\n\tdef find_sents(self, word, tag=''): \r\n\t\tTL=len(tag); \r\n\t\tw0=wordnet.morphy(word.lower());\r\n\t\tfound=defaultdict(list); \r\n\t\tfor sent in self.sents:\r\n\t\t\tfor w,t in sent: \r\n\t\t\t\tif wordnet.morphy(w.lower())==w0 and (not tag or t[:TL]==tag): \r\n\t\t\t\t\tfound[(w,t)].append(sent);\r\n\t\t\t\t\tprint(' -- ', T(sent), w, t); \r\n\t\treturn found;\t\t\t\r\n\t\r\n\t\r\n", "sub_path": "cgi-bin/word_transformer.py", "file_name": "word_transformer.py", "file_ext": "py", "file_size_in_byte": 6641, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "nltk.corpus.brown.tagged_sents", "line_number": 11, "usage_type": "call"}, {"api_name": "nltk.corpus.brown", "line_number": 11, "usage_type": "name"}, {"api_name": "nltk.corpus.FreqDist", "line_number": 15, "usage_type": "call"}, {"api_name": "nltk.corpus", "line_number": 15, "usage_type": "name"}, {"api_name": "nltk.corpus.wordnet.VERB", "line_number": 16, "usage_type": "attribute"}, {"api_name": "nltk.corpus.wordnet", "line_number": 16, "usage_type": "name"}, {"api_name": "nltk.corpus.wordnet.NOUN", "line_number": 16, "usage_type": "attribute"}, {"api_name": "nltk.corpus.wordnet.ADJ", "line_number": 16, "usage_type": "attribute"}, {"api_name": "nltk.corpus.wordnet.ADV", "line_number": 16, "usage_type": "attribute"}, {"api_name": "nltk.corpus.wordnet.morphy", "line_number": 17, "usage_type": "call"}, {"api_name": "nltk.corpus.wordnet", "line_number": 17, "usage_type": "name"}, {"api_name": "nltk.corpus.wordnet.VERB", "line_number": 17, "usage_type": "attribute"}, {"api_name": "nltk.corpus.wordnet.VERB", "line_number": 42, "usage_type": "attribute"}, {"api_name": "nltk.corpus.wordnet", "line_number": 42, "usage_type": "name"}, {"api_name": "nltk.corpus.wordnet.NOUN", "line_number": 42, "usage_type": "attribute"}, {"api_name": "nltk.corpus.wordnet.ADJ", "line_number": 42, "usage_type": "attribute"}, {"api_name": "nltk.corpus.wordnet.ADV", "line_number": 42, "usage_type": "attribute"}, {"api_name": "nltk.corpus.wordnet.morphy", "line_number": 43, "usage_type": "call"}, {"api_name": "nltk.corpus.wordnet", "line_number": 43, "usage_type": "name"}, {"api_name": "nltk.corpus.wordnet.VERB", "line_number": 43, "usage_type": "attribute"}, {"api_name": "nltk.corpus.wordnet.morphy", "line_number": 100, "usage_type": "call"}, {"api_name": "nltk.corpus.wordnet", "line_number": 100, "usage_type": "name"}, {"api_name": "nltk.corpus.wordnet.ADJ", "line_number": 100, "usage_type": "attribute"}, {"api_name": "nltk.corpus.wordnet.morphy", "line_number": 148, "usage_type": "call"}, {"api_name": "nltk.corpus.wordnet", "line_number": 148, "usage_type": "name"}, {"api_name": "nltk.corpus.wordnet.NOUN", "line_number": 148, "usage_type": "attribute"}, {"api_name": "nltk.corpus.wordnet.morphy", "line_number": 152, "usage_type": "call"}, {"api_name": "nltk.corpus.wordnet", "line_number": 152, "usage_type": "name"}, {"api_name": "nltk.corpus.wordnet.NOUN", "line_number": 152, "usage_type": "attribute"}, {"api_name": "nltk.corpus.wordnet.morphy", "line_number": 164, "usage_type": "call"}, {"api_name": "nltk.corpus.wordnet", "line_number": 164, "usage_type": "name"}, {"api_name": "nltk.corpus.wordnet.VERB", "line_number": 164, "usage_type": "attribute"}, {"api_name": "nltk.corpus.wordnet.morphy", "line_number": 168, "usage_type": "call"}, {"api_name": "nltk.corpus.wordnet", "line_number": 168, "usage_type": "name"}, {"api_name": "nltk.corpus.wordnet.VERB", "line_number": 168, "usage_type": "attribute"}, {"api_name": "nltk.corpus.wordnet.morphy", "line_number": 178, "usage_type": "call"}, {"api_name": "nltk.corpus.wordnet", "line_number": 178, "usage_type": "name"}, {"api_name": "nltk.corpus.wordnet.VERB", "line_number": 178, "usage_type": "attribute"}, {"api_name": "nltk.corpus.wordnet.morphy", "line_number": 188, "usage_type": "call"}, {"api_name": "nltk.corpus.wordnet", "line_number": 188, "usage_type": "name"}, {"api_name": "nltk.corpus.wordnet.VERB", "line_number": 188, "usage_type": "attribute"}, {"api_name": "nltk.corpus.wordnet.morphy", "line_number": 202, "usage_type": "call"}, {"api_name": "nltk.corpus.wordnet", "line_number": 202, "usage_type": "name"}, {"api_name": "nltk.corpus.wordnet.morphy", "line_number": 217, "usage_type": "call"}, {"api_name": "nltk.corpus.wordnet", "line_number": 217, "usage_type": "name"}, {"api_name": "nltk.corpus.wordnet.morphy", "line_number": 221, "usage_type": "call"}, {"api_name": "nltk.corpus.wordnet", "line_number": 221, "usage_type": "name"}]} +{"seq_id": "122545870", "text": "import pytest\nfrom collections import namedtuple\n\nProperties = namedtuple(\"Properties\", [\"parameter\", \"expected\"])\n\ndef get_test_properties():\n p1 = Properties(\"parameter1\", \"hello parameter 3\")\n p2 = Properties(\"parameter2\", \"hello parameter 4\")\n return [p1, p2]\n\n@pytest.mark.parametrize(\"properties\", get_test_properties())\ndef test_something_else(properties, create_configuration):\n conf = create_configuration(params=[properties.parameter])\n assert conf.var == properties.expected\n", "sub_path": "categorized/tests/category/test_something_else.py", "file_name": "test_something_else.py", "file_ext": "py", "file_size_in_byte": 501, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "collections.namedtuple", "line_number": 4, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 11, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 11, "usage_type": "attribute"}]} +{"seq_id": "416261111", "text": "from visual import *\nfrom lib.color_list import read_colors\n\ndef get_hex_colors():\n hex_colors = []\n for color in read_colors()[1]:\n hex_colors.append(color[0])\n return hex_colors\n\ndef hex_to_rgb(value):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))\n\ndef rgb_to_hex(rgb):\n return '#%02x%02x%02x' % rgb\n\ndef hex_to_pos(hexval):\n return hex_to_rgb(hexval)\n\ndef pos_to_color(pos):\n x,y,z = pos\n return (x/256., y/256., z/256.)\n\ndef named_color_spheres():\n scene.range = (256, 256, 256)\n scene.center = (128, 128, 128)\n\n hex_colors = get_hex_colors()\n for hex in hex_colors:\n pos = hex_to_pos(hex)\n color = pos_to_color(pos)\n sphere(pos=pos, radius=10, color=color)\n\n\ndef bounce_ball():\n floor = box(pos=(0,0,0), length=4, height=0.5, width=4, color=color.blue)\n ball = sphere(pos=(0,4,0), radius=1, color=color.red)\n ball.velocity = vector(0,-1,0)\n dt = 0.02\n\n while 1:\n rate(100)\n ball.pos = ball.pos + ball.velocity*dt\n if ball.y < ball.radius:\n ball.velocity.y = abs(ball.velocity.y)\n else:\n ball.velocity.y = ball.velocity.y - 9.8*dt\n\n\ndef cube_of_spheres():\n scene.range = (256, 256, 256)\n scene.center = (128, 128, 128)\n t = list(range(0, 256, 26))\n for x in t:\n for y in t:\n for z in t:\n pos = x, y, z\n color = (x/256., y/256., z/256.)\n sphere(pos=pos, radius=10, color=color)\n\n\ndef main():\n prompt = \"\\n1: cube of spheres\\n2: bouncing ball\\n3: named color spheres\\nz: exit\\n\\n>>\"\n option = input(prompt)\n\n if option == \"1\":\n cube_of_spheres()\n elif option == \"2\":\n bounce_ball()\n elif option == \"3\":\n named_color_spheres()\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "thinkpython/ch17_visual.py", "file_name": "ch17_visual.py", "file_ext": "py", "file_size_in_byte": 1888, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "lib.color_list.read_colors", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "119549028", "text": "import requests\nimport json\nfrom conftest import print_timing\nfrom fixtures import session\nfrom fixtures import base_url\nfrom conftest import saveRemoveDiagramCmd\nimport os\nfrom maxfreq import max_freq\nfrom conftest import print_in_shell\n\nclass TestLinkConfig:\n @max_freq(500/3600)\n @print_timing\n def test_create_change_link(self, base_url, session):\n HOSTNAME = os.environ.get('application_hostname')\n # Get user\n diagrams_response = session.get('/rest/dependency-map/1.0/user')\n assert diagrams_response.status_code == 200\n userKey = diagrams_response.json()[\"key\"]\n print_in_shell(\"User key: \" + userKey)\n\n # Create diagram\n payload = {\n 'name':\"E100\",\n 'layoutId':0,\n 'filterKey': 10000,\n 'boxColorFieldKey': \"priority\",\n 'groupedLayoutFieldKey': \"priority\",\n 'matrixLayoutHorizontalFieldKey': 'fixVersions',\n 'matrixLayoutVerticalFieldKey': 'fixVersions',\n 'showTypeIcons': True,\n 'parallelIssueBatches': 4,\n 'issuesPerRow': 5,\n 'secondaryIssues': 1,\n 'boxType': 0\n }\n diagrams_response = session.post('/rest/dependency-map/1.0/diagram',\n json=payload)\n\n newDiagram = diagrams_response.json()\n print('newDiagram=', newDiagram)\n diagramId = str(newDiagram[\"id\"])\n saveRemoveDiagramCmd(diagramId)\n\n #JIRA Get list of available link types\n diagrams_response = session.get('/rest/api/2/issueLinkType')\n issueLinkTypeId = diagrams_response.json()['issueLinkTypes'][0]['id']\n print_in_shell(\"issueLinkTypeId=\" + issueLinkTypeId)\n print_in_shell( diagrams_response.json() )\n\n # Get all link configs\n diagrams_response = session.get('/rest/dependency-map/1.0/linkConfig?diagramId=' + diagramId)\n print_in_shell(\"all link configs\")\n print_in_shell( diagrams_response.json() )\n\n # Create linkConfig\n payload = { 'diagramId': diagramId, 'linkKey': 10000, 'visible': True, 'dashType': 0, 'width': 0, 'colorPaletteEntryId': 20}\n\n diagrams_response = session.post('/rest/dependency-map/1.0/linkConfig?diagramId=' + diagramId,\n json=payload)\n\n newLinkConfig = diagrams_response.json()\n print('newLinkConfig=', newLinkConfig)\n assert(diagrams_response.status_code == 200)\n\n # Update linkConfig\n payload = {'diagramId': diagramId, 'linkKey': 10000, 'visible': True, 'dashType': 1, 'width': 2, 'colorPaletteEntryId': 39}\n\n diagrams_response = session.post('/rest/dependency-map/1.0/linkConfig',\n json=payload)\n assert(diagrams_response.status_code == 200)\n\n # Get all link configs\n diagrams_response = session.get('/rest/dependency-map/1.0/linkConfig?diagramId=' + diagramId)\n print_in_shell( diagrams_response.json() )\n", "sub_path": "app/pytests/test_create_link_config.py", "file_name": "test_create_link_config.py", "file_ext": "py", "file_size_in_byte": 2910, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "os.environ.get", "line_number": 15, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 15, "usage_type": "attribute"}, {"api_name": "fixtures.session.get", "line_number": 17, "usage_type": "call"}, {"api_name": "fixtures.session", "line_number": 17, "usage_type": "name"}, {"api_name": "conftest.print_in_shell", "line_number": 20, "usage_type": "call"}, {"api_name": "fixtures.session.post", "line_number": 37, "usage_type": "call"}, {"api_name": "fixtures.session", "line_number": 37, "usage_type": "name"}, {"api_name": "conftest.saveRemoveDiagramCmd", "line_number": 43, "usage_type": "call"}, {"api_name": "fixtures.session.get", "line_number": 46, "usage_type": "call"}, {"api_name": "fixtures.session", "line_number": 46, "usage_type": "name"}, {"api_name": "conftest.print_in_shell", "line_number": 48, "usage_type": "call"}, {"api_name": "conftest.print_in_shell", "line_number": 49, "usage_type": "call"}, {"api_name": "fixtures.session.get", "line_number": 52, "usage_type": "call"}, {"api_name": "fixtures.session", "line_number": 52, "usage_type": "name"}, {"api_name": "conftest.print_in_shell", "line_number": 53, "usage_type": "call"}, {"api_name": "conftest.print_in_shell", "line_number": 54, "usage_type": "call"}, {"api_name": "fixtures.session.post", "line_number": 59, "usage_type": "call"}, {"api_name": "fixtures.session", "line_number": 59, "usage_type": "name"}, {"api_name": "fixtures.session.post", "line_number": 69, "usage_type": "call"}, {"api_name": "fixtures.session", "line_number": 69, "usage_type": "name"}, {"api_name": "fixtures.session.get", "line_number": 74, "usage_type": "call"}, {"api_name": "fixtures.session", "line_number": 74, "usage_type": "name"}, {"api_name": "conftest.print_in_shell", "line_number": 75, "usage_type": "call"}, {"api_name": "maxfreq.max_freq", "line_number": 12, "usage_type": "call"}, {"api_name": "conftest.print_timing", "line_number": 13, "usage_type": "name"}]} +{"seq_id": "311357920", "text": "import requests\n\nfrom digital_drops.film.merge import MERGE\nfrom digital_drops.media import MediaProvider\n\n\nclass Tmdb(MediaProvider):\n PROVIDER = 'TMDB'\n TYPE = 'FILM'\n URL = 'https://api.themoviedb.org/3/movie/upcoming'\n\n def __init__(self, conn):\n super().__init__(conn, self.PROVIDER, self.TYPE, MERGE)\n\n def _extract_from_api(self):\n params = '&'.join(['language=EN', 'region=US'])\n url = f'{self.URL}?{params}'\n page_num = 1\n\n res_json = self._load_staging(url, page_num)\n\n page_max = self._get_page_from_response(res_json)\n if page_max > 1:\n while page_num <= page_max:\n self._load_staging(url, page_num)\n page_num += 1\n\n @staticmethod\n def _get_page_from_response(json: dict) -> int:\n return int(json['total_pages'])\n\n def _load_staging(self, url: str, page: int) -> dict:\n req_no_key = f'{url}&page={page}&api_key='\n\n req = requests.get(req_no_key + self._api_key)\n res_json = req.json()\n\n if not res_json['results']:\n raise Exception(res_json['status_message'])\n\n sql = self.create_insert_query(url, req.text, page)\n\n self._dao.cursor.execute(sql)\n\n return res_json\n", "sub_path": "digital_drops/film/tmdb.py", "file_name": "tmdb.py", "file_ext": "py", "file_size_in_byte": 1257, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "digital_drops.media.MediaProvider", "line_number": 7, "usage_type": "name"}, {"api_name": "digital_drops.film.merge.MERGE", "line_number": 13, "usage_type": "argument"}, {"api_name": "requests.get", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "650387013", "text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom manufacturing_company.src.common.const import *\n\n\nclass PlotCollectiveClassification:\n def __init__(self, path, levels, random_baseline):\n self.path = path\n self.levels = levels\n self.random_baseline = random_baseline\n self.colors = ['red', 'blue', 'black', '#2FBF71', '#FAA916']\n self.markers = ['D', 's', '<', 'o', 'X']\n self.linestyles = ['--', '-', '--', '-', '--']\n self.x_labels = ['10%', '20%', '30%', '40%', '50%', '60%', '70%', '80%', '90%']\n\n def plot(self):\n sns.set_style(\"darkgrid\")\n sns.set_context(\"notebook\")\n\n plt.figure(figsize=(10, 6))\n\n for month in MONTHS:\n df = pd.read_csv(self.path + '/log_months_' + str(month) + '.csv', sep=';')\n result_series = df.groupby('pct')['f1_score'].max()\n result_series = result_series.sort_index()\n result_series.to_csv(self.path + '/best_score_month_{0}.csv'.format(month), sep=';', index=True)\n plt.plot(self.x_labels, result_series.tolist(), label=str(month), linestyle=self.linestyles[month-1], marker=self.markers[month-1], color=self.colors[month-1])\n\n # random result is approximately 0.42 based on the results from random_classification.ipynb\n random_values = [self.random_baseline(self.levels)]*9\n plt.plot(self.x_labels, random_values, label='random', linestyle=':', marker='*', color='black')\n\n if self.levels == 2:\n levels_desc = 'two'\n else:\n levels_desc = 'three'\n\n plt.legend(loc='lower left', fontsize='small')\n plt.xlim(-1, 9)\n plt.ylim(0, 1.2)\n plt.xticks(np.arange(0,10))\n plt.xlabel('Known nodes')\n plt.ylabel('f1 score (macro)')\n plt.title('CollectiveClassification - ' + levels_desc + ' management levels')\n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., title='Minimum activity in months')\n plt.savefig(self.path + '/collective_classification_' + str(self.levels) + '.eps', bbox_inches='tight', format='eps')\n\n\n", "sub_path": "manufacturing_company/src/visualization/plot_collective_classification.py", "file_name": "plot_collective_classification.py", "file_ext": "py", "file_size_in_byte": 2160, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "seaborn.set_style", "line_number": 20, "usage_type": "call"}, {"api_name": "seaborn.set_context", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}]} +{"seq_id": "351395768", "text": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\nfrom voicebot.stt import stt\nfrom voicebot.tts import tts\nimport time\nimport argparse\n\nif __name__=='__main__':\n parser = argparse.ArgumentParser(description='Echo what you said')\n parser.add_argument('-l', '--lang', required=False, default='zh-tw', help='The language you use')\n args = parser.parse_args()\n quit_terms = ['不玩了.', '不玩了。', 'Stop.', 'Stop。']\n\n stt.init()\n tts.init()\n\n is_echoing = True\n while is_echoing:\n voice_data = stt.record_voice()\n text = stt.google_stt(voice_data, lang=args.lang)\n print('Transcript {}'.format(text))\n time.sleep(2)\n tts.say(text, lang=args.lang)\n is_echoing = text not in quit_terms\n\n stt.stop()\n tts.stop()\n", "sub_path": "scripts/echo_bot.py", "file_name": "echo_bot.py", "file_ext": "py", "file_size_in_byte": 785, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 9, "usage_type": "call"}, {"api_name": "voicebot.stt.stt.init", "line_number": 14, "usage_type": "call"}, {"api_name": "voicebot.stt.stt", "line_number": 14, "usage_type": "name"}, {"api_name": "voicebot.tts.tts.init", "line_number": 15, "usage_type": "call"}, {"api_name": "voicebot.tts.tts", "line_number": 15, "usage_type": "name"}, {"api_name": "voicebot.stt.stt.record_voice", "line_number": 19, "usage_type": "call"}, {"api_name": "voicebot.stt.stt", "line_number": 19, "usage_type": "name"}, {"api_name": "voicebot.stt.stt.google_stt", "line_number": 20, "usage_type": "call"}, {"api_name": "voicebot.stt.stt", "line_number": 20, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 22, "usage_type": "call"}, {"api_name": "voicebot.tts.tts.say", "line_number": 23, "usage_type": "call"}, {"api_name": "voicebot.tts.tts", "line_number": 23, "usage_type": "name"}, {"api_name": "voicebot.stt.stt.stop", "line_number": 26, "usage_type": "call"}, {"api_name": "voicebot.stt.stt", "line_number": 26, "usage_type": "name"}, {"api_name": "voicebot.tts.tts.stop", "line_number": 27, "usage_type": "call"}, {"api_name": "voicebot.tts.tts", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "325884220", "text": "import unittest\nfrom app.test.base_test import BaseTestCase\nfrom app.main import db\nfrom app.main.model.author import Author\n\nfrom sqlalchemy.exc import IntegrityError\n\n\nclass TestUserModel(BaseTestCase): # BaseTestCase = test fixture\n\n def test_create_author(self):\n \"\"\" Test ob die Autor Entität in der Datenbank gespeichert werden kann. \"\"\"\n # given\n given_author = Author(\n name='test_name',\n last_name='test_last_name'\n )\n\n # when\n try:\n db.session.add(given_author)\n db.session.commit()\n except Exception:\n self.fail(\"Entität konnte nicht gespeichert werden!\")\n\n # then\n # should pass without exception\n\n def test_integrity_error(self):\n \"\"\" Testet die Constraints der Entität \"\"\"\n # given\n given_author = Author()\n\n # when\n with self.assertRaises(IntegrityError) as context:\n\n db.session.add(given_author)\n db.session.commit()\n\n # then\n self.assertTrue('NOT NULL constraint failed' in str(context.exception))\n\n\nif __name__ == '__main__':\n unittest.main()\n", "sub_path": "app/test/model/test_author_model.py", "file_name": "test_author_model.py", "file_ext": "py", "file_size_in_byte": 1164, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "app.test.base_test.BaseTestCase", "line_number": 9, "usage_type": "name"}, {"api_name": "app.main.model.author.Author", "line_number": 14, "usage_type": "call"}, {"api_name": "app.main.db.session.add", "line_number": 21, "usage_type": "call"}, {"api_name": "app.main.db.session", "line_number": 21, "usage_type": "attribute"}, {"api_name": "app.main.db", "line_number": 21, "usage_type": "name"}, {"api_name": "app.main.db.session.commit", "line_number": 22, "usage_type": "call"}, {"api_name": "app.main.db.session", "line_number": 22, "usage_type": "attribute"}, {"api_name": "app.main.db", "line_number": 22, "usage_type": "name"}, {"api_name": "app.main.model.author.Author", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.exc.IntegrityError", "line_number": 35, "usage_type": "argument"}, {"api_name": "app.main.db.session.add", "line_number": 37, "usage_type": "call"}, {"api_name": "app.main.db.session", "line_number": 37, "usage_type": "attribute"}, {"api_name": "app.main.db", "line_number": 37, "usage_type": "name"}, {"api_name": "app.main.db.session.commit", "line_number": 38, "usage_type": "call"}, {"api_name": "app.main.db.session", "line_number": 38, "usage_type": "attribute"}, {"api_name": "app.main.db", "line_number": 38, "usage_type": "name"}, {"api_name": "unittest.main", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "465422286", "text": "# from flair_bot import FlairBot as FlairBot \n# from bot_scripts.entry import doBotThings\nfrom flask import Flask, render_template\nfrom flask_basicauth import BasicAuth\n\n# set the project root directory as the static folder, you can set others.\napp = Flask(__name__)\n\napp.config['BASIC_AUTH_USERNAME'] = 'john'\napp.config['BASIC_AUTH_PASSWORD'] = 'matrix'\n\nbasic_auth = BasicAuth(app)\n\n@app.route(\"/\")\ndef index():\n # 750 = Number of possible flairs + 1\n listOfFlairNums = [format(x, \"04d\") for x in range(1, 750)]\n return render_template(\"index.html\", flairlist=listOfFlairNums)\n\n# Password protected Flairbot\n@app.route('/flairbot', methods=['POST'])\n@basic_auth.required\ndef flairbotpage_do():\n #doBotThings(\"flairs\")\n return render_template('flairbot.html', updatedFlairs=True)\n\n@app.route('/flairbot', methods=['GET'])\n@basic_auth.required\ndef flairbotpage_view():\n return render_template('flairbot.html')\n\nif __name__ == \"__main__\":\n app.run()", "sub_path": "router.py", "file_name": "router.py", "file_ext": "py", "file_size_in_byte": 984, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "flask_basicauth.BasicAuth", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "646607059", "text": "import numpy as np\nimport warnings\nfrom scipy.interpolate import interp2d\n\n\ndef interpolate_discrete(lat, long, grid_array, array_latitudes,\n array_longitudes, sector_interpolation=False,\n step=0.01):\n \"\"\"\n Given K 2D-arrays, representing some K-dim magnitude values, interpolates the K values for a given latitude and\n longitude. It can also return the interpolated values for the sector the lat, long pint yields on.\n\n Params:\n ------\n lat: float. Latitude.\n long: float. Longitude.\n grid_array: array. An (M, N, K)-dimensional array containing the\n K-dim values of the magnitude. Each MxN matrix element represents the value for the k_th dimension of that\n magnitude in the m_th, n_th latitude, longitude point on the discrete space.\n array_latitudes: array. An (M)-dimensional vector with the latitude values corresponding to the discrete points of the grid_array.\n array_longitudes: array. An (N)-dimensional vector with the longitude values corresponding to the discrete points\n of the grid_array.\n sector_interpolation: boolean. Default = False. Whether to interpolate the sector or not.\n step: float. Default = 0.01. If interpolating the sector, the step of the new discrete map.\n\n Returns:\n ------\n interpolated_values: array. (K)-dimensional vector with the interpolated values.\n lat_s, long_s: arrays. [ONLY IF sector_interpolation = True] (M')-dim and (N')-dim arrays containing the lats and\n longs of the interpolated sector\n interpolated_sector. [ONLY IF sector_interpolation = True] (M', N', K)-dim array containing the interpolated values\n for the whole sector\n\n \"\"\"\n\n # Look for the closest grid points\n idx = (np.abs(array_latitudes - lat)).argmin()\n idy = (np.abs(array_longitudes - long)).argmin()\n\n # Check if the closest grid point is \"above\" or \"bellow\"\n if array_latitudes[idx] < lat:\n idx -= 1\n if array_longitudes[idy] < long:\n idy -= 1\n\n # Select the sector to interpolate\n small_lat = array_latitudes[idx:idx + 2]\n small_long = array_longitudes[idy:idy + 2]\n small_array = grid_array[idx:idx + 2, idy:idy + 2, :]\n\n k_dimensions = small_array.shape[2]\n\n # Interpolation for each matrix\n interpolated_values = np.zeros([k_dimensions])\n if sector_interpolation:\n lat_s = np.arange(array_latitudes[idx], array_latitudes[idx + 1], step)\n long_s = np.arange(array_longitudes[idx], array_longitudes[1][idx + 1],\n step)\n interpolated_sector = np.zeros([len(lat_s), len(long_s), k_dimensions])\n for i, dimension in enumerate(np.arange(k_dimensions)):\n interpolation_function = interp2d(small_lat, small_long,\n small_array[:, :, dimension])\n\n # Interpolate value\n interpolated_values[i] = interpolation_function(lat, long)\n\n if sector_interpolation:\n interpolated_sector[:, :, i] = interpolation_function(lat_s, long_s)\n\n if sector_interpolation:\n return interpolated_values, lat_s, long_s, interpolated_sector\n else:\n return interpolated_values\n\n\ndef velocity_composition(boat_velocity, lat, long, stream_velocities,\n stream_velocities_latitudes,\n stream_velocities_longitudes):\n \"\"\"\n Given streams velocities on a discrete grid of latitude and longitude points, and a determined boat velocity, it\n compounds the sum of both velocities, interpolating the stream velocity for the latitude and longitude\n position of the boat.\n\n Params:\n ------\n - boat_velocity: array. Vector containing the latitude and longitude\n - components of the boat velocity, in that order.\n * lat: float. Latitude.\n * long: float. Longitude.\n * grid_array: array. An (M, N, K)-dimensional array containing the K-dim\n values of the magnitude. Each MxN matrix\n element represents the value for the k_th dimension of that magnitude in\n the m_th, n_th latitude, longitude point on the discrete space.\n * array_latitudes: array. An (M)-dimensional vector with the latitude\n values corresponding to the discrete points of the grid_array.\n * array_longitudes: array. An (N)-dimensional vector with the longitude\n values corresponding to the discrete points of the grid_array.\n\n Returns:\n ------\n * velocity. array. A vector with the composed sum of velocities, with the\n latitude and longitude components on that order.\n \"\"\"\n interpolated_stream_velocities = interpolate_discrete(lat, long,\n stream_velocities,\n stream_velocities_latitudes,\n stream_velocities_longitudes)\n velocity = boat_velocity + interpolated_stream_velocities\n\n return velocity\n\n\ndef boat_movement(boat_velocity, lat, long, stream_velocities,\n stream_velocities_latitudes, stream_velocities_longitudes,\n ts):\n \"\"\"\n Computes the new position of the boat given the boat velocity, the steams velocity and the timestamp. Timestamp\n is assumed to be so small that the stream velocity does not change from the initial point to the final one.\n\n Params:\n ------\n boat_velocity: array. Vector containing the latitude and longitude components of the boat velocity, in that order.\n lat: float. Latitude.\n long: float. Longitude.\n grid_array: array. An (M, N, K)-dimensional array containing the K-dim values of the magnitude. Each MxN matrix\n element represents the value for the k_th dimension of that magnitude in the m_th, n_th latitude, longitude point\n on the discrete space.\n array_latitudes: array. An (M)-dimensional vector with the latitude values corresponding to the discrete points of\n the grid_array.\n array_longitudes: array. An (N)-dimensional vector with the longitude values corresponding to the discrete points of\n the grid_array.\n ts: float. The time increment interval [THIS VALUE IS ASSUMED TO BE SMALL SO THE DISPLACEMENT IS SHORT].\n\n Returns:\n ------\n new_latitude, new_longitude. floats. The values for the latitude and longitude new positions.\n\n \"\"\"\n\n velocity = velocity_composition(boat_velocity, lat, long, stream_velocities,\n stream_velocities_latitudes,\n stream_velocities_longitudes)\n new_latitude = lat + velocity[0] * ts\n new_longitude = long + velocity[1] * ts\n return new_latitude, new_longitude\n\n\ndef desired_velocity(initial_point, final_point,\n stream_velocities, stream_velocities_latitudes,\n stream_velocities_longitudes, ts, max_distance=1):\n \"\"\"Given the stream speeds, a couple of points, A and B, assuming that the\n distance between those points is much smaller than the 'stream change\n distance', dL, returns the boat velocity and time spent by the boat to\n reach that point.\n\n Params:\n ------\n initial_point: array. 1D vector, containing the initial point\n coordinates, expressed as latitude and longitude values.\n final_point: array. 1D vector, containing the destination point\n coordinates, expressed as latitude and longitude values.\n grid_array: array. An (M, N, K)-dimensional array containing the K-dim\n values of the magnitude. Each MxN matrix element represents the value\n for the k_th dimension of that magnitude in the m_th, n_th latitude,\n longitude point on the discrete space.\n array_latitudes: array. An (M)-dimensional vector with the latitude values\n corresponding to the discrete points of the grid_array.\n array_longitudes: array. An (N)-dimensional vector with the longitude values\n corresponding to the discrete points of the grid_array.\n time_spent: float. Time spent on that journey.\n max_distance: float. Default = 1. Max distance that can be considered for\n the uniform approximation. In units of grid resolution distance.\n\n Returns:\n ------\n\n boat_velocity: array. 1D Vector containing the latitude and longitude\n components of the boat velocity, in that order.\n \"\"\"\n\n # Ensure distance is small enough for uniform regime\n distance = np.sqrt(np.sum((final_point - initial_point) ** 2))\n\n # Assuming that the grid is squared\n grid_distance = np.abs(stream_velocities_latitudes[1]\n - stream_velocities_latitudes[0])\n\n if distance >= max_distance * grid_distance:\n warnings.warn('Distance is {}, which may be too huge. Recommended maximum distance is {}'\n .format(distance, max_distance * grid_distance), Warning, stacklevel=2)\n\n lat = initial_point[0]\n long = initial_point[1]\n\n final_lat = final_point[0]\n final_long = final_point[1]\n\n stream_initial_velocity = interpolate_discrete(\n lat, long,\n stream_velocities,\n stream_velocities_latitudes,\n stream_velocities_longitudes)\n\n stream_final_velocity = interpolate_discrete(\n final_lat, final_long,\n stream_velocities,\n stream_velocities_latitudes,\n stream_velocities_longitudes)\n\n stream_mean_velocity = (stream_initial_velocity +\n stream_final_velocity) / 2\n\n velocity = (final_point - initial_point) / ts\n boat_velocity = velocity - stream_mean_velocity\n\n return boat_velocity\n", "sub_path": "src/velocity.py", "file_name": "velocity.py", "file_ext": "py", "file_size_in_byte": 9616, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "numpy.abs", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 60, "usage_type": "call"}, {"api_name": "scipy.interpolate.interp2d", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 185, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 189, "usage_type": "call"}]} +{"seq_id": "232981652", "text": "from lib import Dijkstra\nfrom itertools import product\n\ndef main(file):\n\twith open(file, 'r') as f:\n\t\tmatrix = [[int(n) for n in line.rstrip().split(',')] for line in f]\n\n\tsize = len(matrix)\n\tSTART = (0, 0)\n\tFINISH = (size - 1, size - 1)\n\tvertices = product(range(size), repeat=2)\n\n\tdef neighbours(v):\n\t\tx, y = v\n\t\tif x > 0: yield x - 1, y\n\t\tif y > 0: yield x, y - 1\n\t\tif y + 1 < size: yield x, y + 1\n\t\tif x + 1 < size: yield x + 1, y\n\n\tdef distance(u, v):\n\t\tx, y = v\n\t\treturn matrix[y][x]\n\n\treturn Dijkstra(vertices, neighbours, distance, START)[FINISH]\n\nif __name__ == '__main__':\n\tprint(main('083.txt')) # 425185\n", "sub_path": "083.py", "file_name": "083.py", "file_ext": "py", "file_size_in_byte": 616, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "itertools.product", "line_number": 11, "usage_type": "call"}, {"api_name": "lib.Dijkstra", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "548853872", "text": "# Copyright 2021 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\n\nimport numpy as np\nfrom llvmlite import binding as ll\nfrom llvmlite import ir as llvmir\nfrom llvmlite.llvmpy import core as lc\nfrom numba import typeof\nfrom numba.core import cgutils, datamodel, types, typing, utils\nfrom numba.core.base import BaseContext\nfrom numba.core.callconv import MinimalCallConv\nfrom numba.core.registry import cpu_target\nfrom numba.core.target_extension import GPU, target_registry\nfrom numba.core.utils import cached_property\n\nfrom numba_dppy.dppy_array_type import DPPYArray, DPPYArrayModel\nfrom numba_dppy.utils import address_space, calling_conv, npytypes_array_to_dppy_array\n\nfrom . import codegen\n\nCC_SPIR_KERNEL = \"spir_kernel\"\nCC_SPIR_FUNC = \"spir_func\"\nVALID_CHARS = re.compile(r\"[^a-z0-9]\", re.I)\nLINK_ATOMIC = 111\nLLVM_SPIRV_ARGS = 112\n\n\nclass DPPYTypingContext(typing.BaseContext):\n \"\"\"A numba_dppy-specific typing context inheriting Numba's ``BaseContext``.\n\n :class:`DPPYTypingContext` is a customized typing context that inherits from\n Numba's ``typing.BaseContext`` class. We add two specific functionalities\n to the basic Numba typing context features: An overridden\n :func:`resolve_argument_type` that changes all ``npytypes.Array`` to\n numba-dppy's :class:`dppy_array_type.DppyArray`. An overridden\n :func:`load_additional_registries` that registers OpenCL math and other\n functions to the typing context.\n\n \"\"\"\n\n def resolve_argument_type(self, val):\n \"\"\"Return the Numba type of a Python value used as a function argument.\n\n Overrides the implementation of ``numba.core.typing.BaseContext`` to\n handle the special case of ``numba.core.types.npytypes.Array``. Whenever\n a NumPy ndarray argument is encountered as an argument to a ``kernel``\n function, it is converted to a ``DPPYArray`` type.\n\n Args:\n val : A Python value that is passed as an argument to a ``kernel``\n function.\n\n Returns: The Numba type corresponding to the Python value.\n\n Raises:\n ValueError: If the type of the Python value is not supported.\n\n \"\"\"\n if type(typeof(val)) is types.npytypes.Array:\n # Convert npytypes.Array to DPPYArray\n return npytypes_array_to_dppy_array(typeof(val))\n else:\n return super().resolve_argument_type(val)\n\n def load_additional_registries(self):\n \"\"\"Register the OpenCL API and math and other functions.\"\"\"\n from numba.core.typing import cmathdecl, npydecl\n\n from .ocl import mathdecl, ocldecl\n\n self.install_registry(ocldecl.registry)\n self.install_registry(mathdecl.registry)\n self.install_registry(cmathdecl.registry)\n self.install_registry(npydecl.registry)\n\n\nclass GenericPointerModel(datamodel.PrimitiveModel):\n def __init__(self, dmm, fe_type):\n adrsp = (\n fe_type.addrspace if fe_type.addrspace is not None else address_space.GLOBAL\n )\n be_type = dmm.lookup(fe_type.dtype).get_data_type().as_pointer(adrsp)\n super(GenericPointerModel, self).__init__(dmm, fe_type, be_type)\n\n\ndef _init_data_model_manager():\n dmm = datamodel.default_manager.copy()\n dmm.register(types.CPointer, GenericPointerModel)\n dmm.register(DPPYArray, DPPYArrayModel)\n return dmm\n\n\nspirv_data_model_manager = _init_data_model_manager()\n\n\nclass SyclDevice(GPU):\n \"\"\"Mark the hardware target as SYCL Device.\"\"\"\n\n pass\n\n\nDPPY_TARGET_NAME = \"SyclDevice\"\n\ntarget_registry[DPPY_TARGET_NAME] = SyclDevice\n\nimport numba_dppy.dppy_offload_dispatcher\n\n\nclass DPPYTargetContext(BaseContext):\n \"\"\"A numba_dppy-specific target context inheriting Numba's ``BaseContext``.\n\n :class:`DPPYTargetContext` is a customized target context that inherits\n from Numba's ``numba.core.base.BaseContext`` class. The class defines\n helper functions to mark LLVM functions as SPIR-V kernels. The class also\n registers OpenCL math and API functions, helper functions for inserting\n LLVM address space cast instructions, and other functionalities used by\n numba_dppy's compiler passes.\n\n \"\"\"\n\n implement_powi_as_math_call = True\n\n def _gen_arg_addrspace_md(self, fn):\n \"\"\"Generate kernel_arg_addr_space metadata.\"\"\"\n mod = fn.module\n fnty = fn.type.pointee\n codes = []\n\n for a in fnty.args:\n if cgutils.is_pointer(a):\n codes.append(address_space.GLOBAL)\n else:\n codes.append(address_space.PRIVATE)\n\n consts = [lc.Constant.int(lc.Type.int(), x) for x in codes]\n name = lc.MetaDataString.get(mod, \"kernel_arg_addr_space\")\n return lc.MetaData.get(mod, [name] + consts)\n\n def _gen_arg_access_qual_md(self, fn):\n \"\"\"Generate kernel_arg_access_qual metadata.\"\"\"\n mod = fn.module\n consts = [lc.MetaDataString.get(mod, \"none\")] * len(fn.args)\n name = lc.MetaDataString.get(mod, \"kernel_arg_access_qual\")\n return lc.MetaData.get(mod, [name] + consts)\n\n def _gen_arg_type(self, fn):\n \"\"\"Generate kernel_arg_type metadata.\"\"\"\n mod = fn.module\n fnty = fn.type.pointee\n consts = [lc.MetaDataString.get(mod, str(a)) for a in fnty.args]\n name = lc.MetaDataString.get(mod, \"kernel_arg_type\")\n return lc.MetaData.get(mod, [name] + consts)\n\n def _gen_arg_type_qual(self, fn):\n \"\"\"Generate kernel_arg_type_qual metadata.\"\"\"\n mod = fn.module\n fnty = fn.type.pointee\n consts = [lc.MetaDataString.get(mod, \"\") for _ in fnty.args]\n name = lc.MetaDataString.get(mod, \"kernel_arg_type_qual\")\n return lc.MetaData.get(mod, [name] + consts)\n\n def _gen_arg_base_type(self, fn):\n \"\"\"Generate kernel_arg_base_type metadata.\"\"\"\n mod = fn.module\n fnty = fn.type.pointee\n consts = [lc.MetaDataString.get(mod, str(a)) for a in fnty.args]\n name = lc.MetaDataString.get(mod, \"kernel_arg_base_type\")\n return lc.MetaData.get(mod, [name] + consts)\n\n def _finalize_wrapper_module(self, fn):\n \"\"\"Add metadata and calling convention to the wrapper function.\n\n The helper function adds function metadata to the wrapper function and\n also module level metadata to the LLVM module containing the wrapper.\n We also make sure the wrapper function has ``spir_kernel`` calling\n convention, without which the function cannot be used as a kernel.\n\n Args:\n fn: LLVM function representing the \"kernel\" wrapper function.\n\n \"\"\"\n mod = fn.module\n # Set norecurse\n fn.attributes.add(\"norecurse\")\n # Set SPIR kernel calling convention\n fn.calling_convention = CC_SPIR_KERNEL\n\n # Mark kernels\n ocl_kernels = mod.get_or_insert_named_metadata(\"opencl.kernels\")\n ocl_kernels.add(\n lc.MetaData.get(\n mod,\n [\n fn,\n self._gen_arg_addrspace_md(fn),\n self._gen_arg_access_qual_md(fn),\n self._gen_arg_type(fn),\n self._gen_arg_type_qual(fn),\n self._gen_arg_base_type(fn),\n ],\n )\n )\n\n # Other metadata\n empty_md = lc.MetaData.get(mod, ())\n others = [\n \"opencl.used.extensions\",\n \"opencl.used.optional.core.features\",\n \"opencl.compiler.options\",\n ]\n\n for name in others:\n nmd = mod.get_or_insert_named_metadata(name)\n if not nmd.operands:\n nmd.add(empty_md)\n\n def _generate_kernel_wrapper(self, func, argtypes):\n module = func.module\n arginfo = self.get_arg_packer(argtypes)\n wrapperfnty = lc.Type.function(lc.Type.void(), arginfo.argument_types)\n wrapper_module = self.create_module(\"dppy.kernel.wrapper\")\n wrappername = \"dppyPy_{name}\".format(name=func.name)\n argtys = list(arginfo.argument_types)\n fnty = lc.Type.function(\n lc.Type.int(),\n [self.call_conv.get_return_type(types.pyobject)] + argtys,\n )\n func = wrapper_module.add_function(fnty, name=func.name)\n func.calling_convention = CC_SPIR_FUNC\n wrapper = wrapper_module.add_function(wrapperfnty, name=wrappername)\n builder = lc.Builder(wrapper.append_basic_block(\"\"))\n\n callargs = arginfo.from_arguments(builder, wrapper.args)\n\n # XXX handle error status\n status, _ = self.call_conv.call_function(\n builder, func, types.void, argtypes, callargs\n )\n builder.ret_void()\n\n self._finalize_wrapper_module(wrapper)\n\n # Link the spir_func module to the wrapper module\n module.link_in(ll.parse_assembly(str(wrapper_module)))\n # Make sure the spir_func has internal linkage to be inlinable.\n func.linkage = \"internal\"\n wrapper = module.get_function(wrapper.name)\n module.get_function(func.name).linkage = \"internal\"\n return wrapper\n\n def __init__(self, typingctx, target=DPPY_TARGET_NAME):\n super().__init__(typingctx, target)\n\n def init(self):\n self._internal_codegen = codegen.JITSPIRVCodegen(\"numba_dppy.jit\")\n self._target_data = ll.create_target_data(\n codegen.SPIR_DATA_LAYOUT[utils.MACHINE_BITS]\n )\n # Override data model manager to SPIR model\n import numba.cpython.unicode\n\n self.data_model_manager = _init_data_model_manager()\n self.extra_compile_options = dict()\n\n import copy\n\n from numba.np.ufunc_db import _lazy_init_db\n\n _lazy_init_db()\n from numba.np.ufunc_db import _ufunc_db as ufunc_db\n\n self.ufunc_db = copy.deepcopy(ufunc_db)\n self.cpu_context = cpu_target.target_context\n\n # Overrides\n def create_module(self, name):\n return self._internal_codegen._create_empty_module(name)\n\n def replace_numpy_ufunc_with_opencl_supported_functions(self):\n from numba_dppy.ocl.mathimpl import lower_ocl_impl, sig_mapper\n\n ufuncs = [\n (\"fabs\", np.fabs),\n (\"exp\", np.exp),\n (\"log\", np.log),\n (\"log10\", np.log10),\n (\"expm1\", np.expm1),\n (\"log1p\", np.log1p),\n (\"sqrt\", np.sqrt),\n (\"sin\", np.sin),\n (\"cos\", np.cos),\n (\"tan\", np.tan),\n (\"asin\", np.arcsin),\n (\"acos\", np.arccos),\n (\"atan\", np.arctan),\n (\"atan2\", np.arctan2),\n (\"sinh\", np.sinh),\n (\"cosh\", np.cosh),\n (\"tanh\", np.tanh),\n (\"asinh\", np.arcsinh),\n (\"acosh\", np.arccosh),\n (\"atanh\", np.arctanh),\n (\"ldexp\", np.ldexp),\n (\"floor\", np.floor),\n (\"ceil\", np.ceil),\n (\"trunc\", np.trunc),\n (\"hypot\", np.hypot),\n (\"exp2\", np.exp2),\n (\"log2\", np.log2),\n ]\n\n for name, ufunc in ufuncs:\n for sig in self.ufunc_db[ufunc].keys():\n if sig in sig_mapper and (name, sig_mapper[sig]) in lower_ocl_impl:\n self.ufunc_db[ufunc][sig] = lower_ocl_impl[(name, sig_mapper[sig])]\n\n def load_additional_registries(self):\n \"\"\"Register OpenCL functions into numba-dppy's target context.\n\n To make sure we are calling supported OpenCL math functions, we\n replace some of NUMBA's NumPy ufunc with OpenCL versions of those\n functions. The replacement is done after the OpenCL functions have\n been registered into the target context.\n\n \"\"\"\n from numba.np import npyimpl\n\n from . import printimpl\n from .ocl import mathimpl, oclimpl\n\n self.insert_func_defn(oclimpl.registry.functions)\n self.insert_func_defn(mathimpl.registry.functions)\n self.insert_func_defn(npyimpl.registry.functions)\n self.install_registry(printimpl.registry)\n # Replace NumPy functions with their OpenCL versions.\n self.replace_numpy_ufunc_with_opencl_supported_functions()\n\n @cached_property\n def call_conv(self):\n return DPPYCallConv(self)\n\n def codegen(self):\n return self._internal_codegen\n\n @property\n def target_data(self):\n return self._target_data\n\n def mangler(self, name, argtypes):\n def repl(m):\n ch = m.group(0)\n return \"_%X_\" % ord(ch)\n\n qualified = name + \".\" + \".\".join(str(a) for a in argtypes)\n mangled = VALID_CHARS.sub(repl, qualified)\n return \"dppy_py_devfn_\" + mangled\n\n def prepare_ocl_kernel(self, func, argtypes):\n module = func.module\n func.linkage = \"linkonce_odr\"\n module.data_layout = codegen.SPIR_DATA_LAYOUT[self.address_size]\n wrapper = self._generate_kernel_wrapper(func, argtypes)\n return wrapper\n\n def mark_ocl_device(self, func):\n # Adapt to SPIR\n func.calling_convention = CC_SPIR_FUNC\n func.linkage = \"linkonce_odr\"\n return func\n\n def declare_function(self, module, fndesc):\n \"\"\"Create the LLVM function from a ``numba_dppy.kernel`` decorated\n function.\n\n Args:\n module (llvmlite.llvmpy.core.Module) : The LLVM module into which\n the kernel function will be inserted.\n fndesc (numba.core.funcdesc.PythonFunctionDescriptor) : The\n signature of the function.\n\n Returns:\n llvmlite.ir.values.Function: The reference to the LLVM Function\n that was inserted into the module.\n\n \"\"\"\n fnty = self.call_conv.get_function_type(fndesc.restype, fndesc.argtypes)\n fn = module.get_or_insert_function(fnty, name=fndesc.mangled_name)\n if not self.enable_debuginfo:\n fn.attributes.add(\"alwaysinline\")\n ret = super(DPPYTargetContext, self).declare_function(module, fndesc)\n ret.calling_convention = calling_conv.CC_SPIR_FUNC\n return ret\n\n def insert_const_string(self, mod, string):\n \"\"\"Create a global string from the passed in string argument and return\n a void* in the GENERIC address space pointing to that string.\n\n Args:\n mod: LLVM module where the global string value is to be inserted.\n string: A Python string that will be converted to a global constant\n string and inserted into the module.\n\n Returns: A LLVM Constant pointing to the global string value inserted\n into the module.\n\n \"\"\"\n text = cgutils.make_bytearray(string.encode(\"utf-8\") + b\"\\x00\")\n\n name = \"$\".join([\"__conststring__\", self.mangler(string, [\"str\"])])\n\n # Try to reuse existing global\n gv = mod.globals.get(name)\n if gv is None:\n # Not defined yet\n gv = cgutils.add_global_variable(\n mod, text.type, name=name, addrspace=address_space.GENERIC\n )\n gv.linkage = \"internal\"\n gv.global_constant = True\n gv.initializer = text\n\n # Cast to a i8* pointer\n charty = gv.type.pointee.element\n return gv.bitcast(charty.as_pointer(address_space.GENERIC))\n\n def addrspacecast(self, builder, src, addrspace):\n \"\"\"Insert an LLVM addressspace cast instruction into the module.\n\n FIXME: Move this function into utils.\n\n \"\"\"\n ptras = llvmir.PointerType(src.type.pointee, addrspace=addrspace)\n return builder.addrspacecast(src, ptras)\n\n # Overrides\n def get_ufunc_info(self, ufunc_key):\n return self.ufunc_db[ufunc_key]\n\n\nclass DPPYCallConv(MinimalCallConv):\n \"\"\"Custom calling convention class used by numba-dppy.\n\n Numba-dppy's calling convention derives from\n :class:`numba.core.callconv import MinimalCallConv`. The\n :class:`DPPYCallConv` overriddes :func:`call_function`.\n\n \"\"\"\n\n def call_function(self, builder, callee, resty, argtys, args, env=None):\n \"\"\"Call the Numba-compiled *callee*.\"\"\"\n assert env is None\n retty = callee.args[0].type.pointee\n retvaltmp = cgutils.alloca_once(builder, retty)\n # initialize return value\n builder.store(cgutils.get_null_value(retty), retvaltmp)\n arginfo = self.context.get_arg_packer(argtys)\n args = arginfo.as_arguments(builder, args)\n realargs = [retvaltmp] + list(args)\n code = builder.call(callee, realargs)\n status = self._get_return_status(builder, code)\n retval = builder.load(retvaltmp)\n out = self.context.get_returned_value(builder, resty, retval)\n return status, out\n", "sub_path": "numba_dppy/target.py", "file_name": "target.py", "file_ext": "py", "file_size_in_byte": 17225, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "re.compile", "line_number": 36, "usage_type": "call"}, {"api_name": "re.I", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numba.core.typing.BaseContext", "line_number": 41, "usage_type": "attribute"}, {"api_name": "numba.core.typing", "line_number": 41, "usage_type": "name"}, {"api_name": "numba.typeof", "line_number": 72, "usage_type": "call"}, {"api_name": "numba.core.types.npytypes", "line_number": 72, "usage_type": "attribute"}, {"api_name": "numba.core.types", "line_number": 72, "usage_type": "name"}, {"api_name": "numba_dppy.utils.npytypes_array_to_dppy_array", "line_number": 74, "usage_type": "call"}, {"api_name": "numba.typeof", "line_number": 74, "usage_type": "call"}, {"api_name": "ocl.ocldecl.registry", "line_number": 84, "usage_type": "attribute"}, {"api_name": "ocl.ocldecl", "line_number": 84, "usage_type": "name"}, {"api_name": "ocl.mathdecl.registry", "line_number": 85, "usage_type": "attribute"}, {"api_name": "ocl.mathdecl", "line_number": 85, "usage_type": "name"}, {"api_name": "numba.core.typing.cmathdecl.registry", "line_number": 86, "usage_type": "attribute"}, {"api_name": "numba.core.typing.cmathdecl", "line_number": 86, "usage_type": "name"}, {"api_name": "numba.core.typing.npydecl.registry", "line_number": 87, "usage_type": "attribute"}, {"api_name": "numba.core.typing.npydecl", "line_number": 87, "usage_type": "name"}, {"api_name": "numba.core.datamodel.PrimitiveModel", "line_number": 90, "usage_type": "attribute"}, {"api_name": "numba.core.datamodel", "line_number": 90, "usage_type": "name"}, {"api_name": "numba_dppy.utils.address_space.GLOBAL", "line_number": 93, "usage_type": "attribute"}, {"api_name": "numba_dppy.utils.address_space", "line_number": 93, "usage_type": "name"}, {"api_name": "numba.core.datamodel.default_manager.copy", "line_number": 100, "usage_type": "call"}, {"api_name": "numba.core.datamodel.default_manager", "line_number": 100, "usage_type": "attribute"}, {"api_name": "numba.core.datamodel", "line_number": 100, "usage_type": "name"}, {"api_name": "numba.core.types.CPointer", "line_number": 101, "usage_type": "attribute"}, {"api_name": "numba.core.types", "line_number": 101, "usage_type": "name"}, {"api_name": "numba_dppy.dppy_array_type.DPPYArray", "line_number": 102, "usage_type": "argument"}, {"api_name": "numba_dppy.dppy_array_type.DPPYArrayModel", "line_number": 102, "usage_type": "argument"}, {"api_name": "numba.core.target_extension.GPU", "line_number": 109, "usage_type": "name"}, {"api_name": "numba.core.target_extension.target_registry", "line_number": 117, "usage_type": "name"}, {"api_name": "numba.core.base.BaseContext", "line_number": 122, "usage_type": "name"}, {"api_name": "numba.core.cgutils.is_pointer", "line_number": 143, "usage_type": "call"}, {"api_name": "numba.core.cgutils", "line_number": 143, "usage_type": "name"}, {"api_name": "numba_dppy.utils.address_space.GLOBAL", "line_number": 144, "usage_type": "attribute"}, {"api_name": "numba_dppy.utils.address_space", "line_number": 144, "usage_type": "name"}, {"api_name": "numba_dppy.utils.address_space.PRIVATE", "line_number": 146, "usage_type": "attribute"}, {"api_name": "numba_dppy.utils.address_space", "line_number": 146, "usage_type": "name"}, {"api_name": "llvmlite.llvmpy.core.Constant.int", "line_number": 148, "usage_type": "call"}, {"api_name": "llvmlite.llvmpy.core.Constant", "line_number": 148, "usage_type": "attribute"}, {"api_name": "llvmlite.llvmpy.core", "line_number": 148, "usage_type": "name"}, {"api_name": "llvmlite.llvmpy.core.Type.int", "line_number": 148, "usage_type": "call"}, {"api_name": "llvmlite.llvmpy.core.Type", "line_number": 148, "usage_type": "attribute"}, {"api_name": "llvmlite.llvmpy.core.MetaDataString.get", "line_number": 149, "usage_type": "call"}, {"api_name": "llvmlite.llvmpy.core.MetaDataString", "line_number": 149, "usage_type": "attribute"}, {"api_name": "llvmlite.llvmpy.core", "line_number": 149, "usage_type": "name"}, {"api_name": "llvmlite.llvmpy.core.MetaData.get", "line_number": 150, "usage_type": "call"}, {"api_name": "llvmlite.llvmpy.core.MetaData", "line_number": 150, "usage_type": "attribute"}, {"api_name": "llvmlite.llvmpy.core", "line_number": 150, "usage_type": "name"}, {"api_name": "llvmlite.llvmpy.core.MetaDataString.get", "line_number": 155, "usage_type": "call"}, {"api_name": "llvmlite.llvmpy.core.MetaDataString", "line_number": 155, "usage_type": "attribute"}, {"api_name": "llvmlite.llvmpy.core", "line_number": 155, "usage_type": "name"}, {"api_name": "llvmlite.llvmpy.core.MetaDataString.get", "line_number": 156, "usage_type": "call"}, {"api_name": "llvmlite.llvmpy.core.MetaDataString", "line_number": 156, "usage_type": "attribute"}, {"api_name": "llvmlite.llvmpy.core", "line_number": 156, "usage_type": "name"}, {"api_name": "llvmlite.llvmpy.core.MetaData.get", "line_number": 157, "usage_type": "call"}, {"api_name": "llvmlite.llvmpy.core.MetaData", "line_number": 157, "usage_type": "attribute"}, {"api_name": "llvmlite.llvmpy.core", "line_number": 157, "usage_type": "name"}, {"api_name": "llvmlite.llvmpy.core.MetaDataString.get", "line_number": 163, "usage_type": "call"}, {"api_name": "llvmlite.llvmpy.core.MetaDataString", "line_number": 163, "usage_type": "attribute"}, {"api_name": "llvmlite.llvmpy.core", "line_number": 163, "usage_type": "name"}, {"api_name": "llvmlite.llvmpy.core.MetaDataString.get", "line_number": 164, "usage_type": "call"}, {"api_name": "llvmlite.llvmpy.core.MetaDataString", "line_number": 164, "usage_type": "attribute"}, {"api_name": "llvmlite.llvmpy.core", "line_number": 164, "usage_type": "name"}, {"api_name": "llvmlite.llvmpy.core.MetaData.get", "line_number": 165, "usage_type": "call"}, {"api_name": "llvmlite.llvmpy.core.MetaData", "line_number": 165, "usage_type": "attribute"}, {"api_name": "llvmlite.llvmpy.core", "line_number": 165, "usage_type": "name"}, {"api_name": "llvmlite.llvmpy.core.MetaDataString.get", "line_number": 171, "usage_type": "call"}, {"api_name": "llvmlite.llvmpy.core.MetaDataString", "line_number": 171, "usage_type": "attribute"}, {"api_name": "llvmlite.llvmpy.core", "line_number": 171, "usage_type": "name"}, {"api_name": "llvmlite.llvmpy.core.MetaDataString.get", "line_number": 172, "usage_type": "call"}, {"api_name": "llvmlite.llvmpy.core.MetaDataString", "line_number": 172, "usage_type": "attribute"}, {"api_name": "llvmlite.llvmpy.core", "line_number": 172, "usage_type": "name"}, {"api_name": "llvmlite.llvmpy.core.MetaData.get", "line_number": 173, "usage_type": "call"}, {"api_name": "llvmlite.llvmpy.core.MetaData", "line_number": 173, "usage_type": "attribute"}, {"api_name": "llvmlite.llvmpy.core", "line_number": 173, "usage_type": "name"}, {"api_name": "llvmlite.llvmpy.core.MetaDataString.get", "line_number": 179, "usage_type": "call"}, {"api_name": "llvmlite.llvmpy.core.MetaDataString", "line_number": 179, "usage_type": "attribute"}, {"api_name": "llvmlite.llvmpy.core", "line_number": 179, "usage_type": "name"}, {"api_name": "llvmlite.llvmpy.core.MetaDataString.get", "line_number": 180, "usage_type": "call"}, {"api_name": "llvmlite.llvmpy.core.MetaDataString", "line_number": 180, "usage_type": "attribute"}, {"api_name": "llvmlite.llvmpy.core", "line_number": 180, "usage_type": "name"}, {"api_name": "llvmlite.llvmpy.core.MetaData.get", "line_number": 181, "usage_type": "call"}, {"api_name": "llvmlite.llvmpy.core.MetaData", "line_number": 181, "usage_type": "attribute"}, {"api_name": "llvmlite.llvmpy.core", "line_number": 181, "usage_type": "name"}, {"api_name": "llvmlite.llvmpy.core.MetaData.get", "line_number": 204, "usage_type": "call"}, {"api_name": "llvmlite.llvmpy.core.MetaData", "line_number": 204, "usage_type": "attribute"}, {"api_name": "llvmlite.llvmpy.core", "line_number": 204, "usage_type": "name"}, {"api_name": "llvmlite.llvmpy.core.MetaData.get", "line_number": 218, "usage_type": "call"}, {"api_name": "llvmlite.llvmpy.core.MetaData", "line_number": 218, "usage_type": "attribute"}, {"api_name": "llvmlite.llvmpy.core", "line_number": 218, "usage_type": "name"}, {"api_name": "llvmlite.llvmpy.core.Type.function", "line_number": 233, "usage_type": "call"}, {"api_name": "llvmlite.llvmpy.core.Type", "line_number": 233, "usage_type": "attribute"}, {"api_name": "llvmlite.llvmpy.core", "line_number": 233, "usage_type": "name"}, {"api_name": "llvmlite.llvmpy.core.Type.void", "line_number": 233, "usage_type": "call"}, {"api_name": "llvmlite.llvmpy.core.Type.function", "line_number": 237, "usage_type": "call"}, {"api_name": "llvmlite.llvmpy.core.Type", "line_number": 237, "usage_type": "attribute"}, {"api_name": "llvmlite.llvmpy.core", "line_number": 237, "usage_type": "name"}, {"api_name": "llvmlite.llvmpy.core.Type.int", "line_number": 238, "usage_type": "call"}, {"api_name": "llvmlite.llvmpy.core.Type", "line_number": 238, "usage_type": "attribute"}, {"api_name": "llvmlite.llvmpy.core", "line_number": 238, "usage_type": "name"}, {"api_name": "numba.core.types.pyobject", "line_number": 239, "usage_type": "attribute"}, {"api_name": "numba.core.types", "line_number": 239, "usage_type": "name"}, {"api_name": "llvmlite.llvmpy.core.Builder", "line_number": 244, "usage_type": "call"}, {"api_name": "llvmlite.llvmpy.core", "line_number": 244, "usage_type": "name"}, {"api_name": "numba.core.types.void", "line_number": 250, "usage_type": "attribute"}, {"api_name": "numba.core.types", "line_number": 250, "usage_type": "name"}, {"api_name": "llvmlite.binding.parse_assembly", "line_number": 257, "usage_type": "call"}, {"api_name": "llvmlite.binding", "line_number": 257, "usage_type": "name"}, {"api_name": "llvmlite.binding.create_target_data", "line_number": 269, "usage_type": "call"}, {"api_name": "llvmlite.binding", "line_number": 269, "usage_type": "name"}, {"api_name": "numba.core.utils.MACHINE_BITS", "line_number": 270, "usage_type": "attribute"}, {"api_name": "numba.core.utils", "line_number": 270, "usage_type": "name"}, {"api_name": "numba.np.ufunc_db._lazy_init_db", "line_number": 282, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 285, "usage_type": "call"}, {"api_name": "numba.np.ufunc_db._ufunc_db", "line_number": 285, "usage_type": "name"}, {"api_name": "numba.core.registry.cpu_target.target_context", "line_number": 286, "usage_type": "attribute"}, {"api_name": "numba.core.registry.cpu_target", "line_number": 286, "usage_type": "name"}, {"api_name": "numpy.fabs", "line_number": 296, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 297, "usage_type": "attribute"}, {"api_name": "numpy.log", "line_number": 298, "usage_type": "attribute"}, {"api_name": "numpy.log10", "line_number": 299, "usage_type": "attribute"}, {"api_name": "numpy.expm1", "line_number": 300, "usage_type": "attribute"}, {"api_name": "numpy.log1p", "line_number": 301, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 302, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 303, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 304, "usage_type": "attribute"}, {"api_name": "numpy.tan", "line_number": 305, "usage_type": "attribute"}, {"api_name": "numpy.arcsin", "line_number": 306, "usage_type": "attribute"}, {"api_name": "numpy.arccos", "line_number": 307, "usage_type": "attribute"}, {"api_name": "numpy.arctan", "line_number": 308, "usage_type": "attribute"}, {"api_name": "numpy.arctan2", "line_number": 309, "usage_type": "attribute"}, {"api_name": "numpy.sinh", "line_number": 310, "usage_type": "attribute"}, {"api_name": "numpy.cosh", "line_number": 311, "usage_type": "attribute"}, {"api_name": "numpy.tanh", "line_number": 312, "usage_type": "attribute"}, {"api_name": "numpy.arcsinh", "line_number": 313, "usage_type": "attribute"}, {"api_name": "numpy.arccosh", "line_number": 314, "usage_type": "attribute"}, {"api_name": "numpy.arctanh", "line_number": 315, "usage_type": "attribute"}, {"api_name": "numpy.ldexp", "line_number": 316, "usage_type": "attribute"}, {"api_name": "numpy.floor", "line_number": 317, "usage_type": "attribute"}, {"api_name": "numpy.ceil", "line_number": 318, "usage_type": "attribute"}, {"api_name": "numpy.trunc", "line_number": 319, "usage_type": "attribute"}, {"api_name": "numpy.hypot", "line_number": 320, "usage_type": "attribute"}, {"api_name": "numpy.exp2", "line_number": 321, "usage_type": "attribute"}, {"api_name": "numpy.log2", "line_number": 322, "usage_type": "attribute"}, {"api_name": "numba_dppy.ocl.mathimpl.sig_mapper", "line_number": 327, "usage_type": "name"}, {"api_name": "numba_dppy.ocl.mathimpl.lower_ocl_impl", "line_number": 327, "usage_type": "name"}, {"api_name": "numba_dppy.ocl.mathimpl.lower_ocl_impl", "line_number": 328, "usage_type": "name"}, {"api_name": "numba_dppy.ocl.mathimpl.sig_mapper", "line_number": 328, "usage_type": "name"}, {"api_name": "ocl.oclimpl.registry", "line_number": 344, "usage_type": "attribute"}, {"api_name": "ocl.oclimpl", "line_number": 344, "usage_type": "name"}, {"api_name": "ocl.mathimpl.registry", "line_number": 345, "usage_type": "attribute"}, {"api_name": "ocl.mathimpl", "line_number": 345, "usage_type": "name"}, {"api_name": "numba.np.npyimpl.registry", "line_number": 346, "usage_type": "attribute"}, {"api_name": "numba.np.npyimpl", "line_number": 346, "usage_type": "name"}, {"api_name": "numba.core.utils.cached_property", "line_number": 351, "usage_type": "name"}, {"api_name": "numba_dppy.utils.calling_conv.CC_SPIR_FUNC", "line_number": 404, "usage_type": "attribute"}, {"api_name": "numba_dppy.utils.calling_conv", "line_number": 404, "usage_type": "name"}, {"api_name": "numba.core.cgutils.make_bytearray", "line_number": 420, "usage_type": "call"}, {"api_name": "numba.core.cgutils", "line_number": 420, "usage_type": "name"}, {"api_name": "numba.core.cgutils.add_global_variable", "line_number": 428, "usage_type": "call"}, {"api_name": "numba.core.cgutils", "line_number": 428, "usage_type": "name"}, {"api_name": "numba_dppy.utils.address_space.GENERIC", "line_number": 429, "usage_type": "attribute"}, {"api_name": "numba_dppy.utils.address_space", "line_number": 429, "usage_type": "name"}, {"api_name": "numba_dppy.utils.address_space.GENERIC", "line_number": 437, "usage_type": "attribute"}, {"api_name": "numba_dppy.utils.address_space", "line_number": 437, "usage_type": "name"}, {"api_name": "llvmlite.ir.PointerType", "line_number": 445, "usage_type": "call"}, {"api_name": "llvmlite.ir", "line_number": 445, "usage_type": "name"}, {"api_name": "numba.core.callconv.MinimalCallConv", "line_number": 453, "usage_type": "name"}, {"api_name": "numba.core.cgutils.alloca_once", "line_number": 466, "usage_type": "call"}, {"api_name": "numba.core.cgutils", "line_number": 466, "usage_type": "name"}, {"api_name": "numba.core.cgutils.get_null_value", "line_number": 468, "usage_type": "call"}, {"api_name": "numba.core.cgutils", "line_number": 468, "usage_type": "name"}]} +{"seq_id": "542234412", "text": "import pymysql\n# from sshtunnel import SSHTunnelForwarder\nimport os\n\nroot = {'host': '149.166.99.237',\n 'database': 'indot_db_10-08-2020',\n 'user': 'iuindot',\n 'pass': 'indot_Passwd_2019'}\n\nother = {'user': 'root', 'pass': 'zanpaktao'}\n\ntables = {\n\n 'ip_address':\n \"ip_id` int(11) NOT NULL,\"\n \"camera_id` int(11) DEFAULT NULL,\"\n \"stream_link` varchar(45) DEFAULT NULL,\"\n \"created_date` datetime DEFAULT NULL,\"\n \"updated_date` datetime DEFAULT NULL,\"\n \"PRIMARY KEY (`ip_id`),\"\n \"KEY `camera_id_idx` (`camera_id`),\"\n \"CONSTRAINT `camera_id` FOREIGN KEY (`camera_id`) REFERENCES `camera_list` (`camera_id`),\"\n \") ENGINE=InnoDB DEFAULT CHARSET=utf8\",\n\n}\n\nfields = {\n\n\n 'ip_address':\n \"ip_id,\"\n \"camera_id,\"\n \"stream_link,\"\n \"created_date,\"\n \"updated_date\",\n\n\n}\n\nsql_queries = dict()\n\n\ndef init_insert_queries():\n global sql_queries\n for table_name, columns in fields.items():\n q = \"INSERT INTO \" + table_name + \" (\" + columns + \") VALUES \" + \\\n \"(\" + \",\".join([\"%s\"] * len(columns.split(\",\"))) + \")\"\n sql_queries[table_name] = q\n\n\ndef get_connection():\n print(\"Connecting...\")\n # tunnel = None\n # try:\n # tunnel = SSHTunnelForwarder(\n # ('in-engr-indot.engr.iupui.edu', 22),\n # ssh_username='zd2',\n # ssh_password='Allan@870630zmd',\n # remote_bind_address=('127.0.0.1', 3306),\n # )\n # except:\n # print('false')\n # print(type(tunnel))\n # tunnel.start()\n cnx = pymysql.connect(user=root.get(\"user\"),\n password=root.get(\"pass\"),\n database=root.get(\"database\"),\n host=root.get(\"host\"),\n port=3306, # tunnel.local_bind_port,\n )\n print(\"Connected\")\n init_insert_queries()\n return cnx\n\n\ndef create_tables(cnx_cursor):\n for table in tables:\n cnx_cursor.execute(table)\n\n\ndef update_tables(cnx, cnx_cursor, queries):\n global sql_queries\n for q in queries:\n cnx_cursor.execute(sql_queries[q[0]], q[1:])\n cnx.commit()\n\n\ndef delete_entries(cnx_cursor, table_names, conditions):\n for name in table_names:\n cnx_cursor.execute(\"DELETE FROM \" + name + ((\"WHERE \" + conditions[name]) if name in conditions else ''))\n\n\ndef drop_all_tables(cnx_cursor):\n cnx_cursor.execute(\"SET FOREIGN_KEY_CHECKS = 0\")\n for table in fields:\n cnx_cursor.execute(\"DROP TABLE IF EXISTS \" + table)\n\n\ndef save_static_tables(folder):\n cnx, t = get_connection()\n cursor = cnx.cursor()\n cursor.execute(\"SELECT * FROM camera\")\n with open(os.path.join(folder, \"camera.txt\"), \"wt\") as f:\n for i in cursor.fetchall():\n f.write(str(i))\n f.write('\\n')\n cursor.execute(\"SELECT * FROM road\")\n with open(os.path.join(folder, \"road.txt\"), \"wt\") as f:\n for i in cursor.fetchall():\n f.write(str(i))\n f.write('\\n')\n cursor.execute(\"SELECT * FROM location\")\n with open(os.path.join(folder, \"location.txt\"), \"wt\") as f:\n for i in cursor.fetchall():\n f.write(str(i))\n f.write('\\n')\n cursor.execute(\"SELECT * FROM ip_address\")\n with open(os.path.join(folder, \"ip_address.txt\"), \"wt\") as f:\n for i in cursor.fetchall():\n f.write(str(i))\n f.write('\\n')\n\n\ndef get_table_names(cnx_cursor):\n cnx_cursor.execute(\"SHOW TABLES\")\n return [table for (table,) in cnx_cursor.fetchall()]\n\n\ndef insert(cnx, cnx_cursor, query):\n cnx_cursor.execute(sql_queries[query[0]], tuple(query[1:]))\n cnx.commit()\n\n\ndef insert_all(cnx, cnx_cursor, queries):\n for q in queries:\n insert(cnx, cnx_cursor, q)\n\n\nif __name__ == '__main__':\n cursor.execute(\"SELECT * FROM lane_condition\")\n # lc = cursor.fetchall()\n # for i in lc:\n # print(i)\n# cursor.execute(\n# \"SELECT flow_rate, density \"\n# \"FROM lane_condition \"\n# 'WHERE camera_location_view_road_id IN (SELECT camera_id FROM ip_address WHERE ip_address=\"rtsp://10.10.0.211/\"), '\n# 'location_id IN (SELECT location_id FROM ip_address WHERE ip_address=\"rtsp://10.10.0.211/\")', multi=True)\n# for i in cursor.fetchall():\n# print(i)\n# cursor.execute(\"SELECT * FROM camera\")\n# entries = cursor.fetchall()\n# for e in entries:\n# print(e)\n# cursor.execute(\"SELECT * FROM road\")\n# entries = cursor.fetchall()\n# for e in entries:\n# print(e)\n# cursor.execute(\"SELECT * FROM location\")\n# entries = cursor.fetchall()\n# for e in entries:\n# print(e)\n# cursor.execute(\"SELECT * FROM camera_location_view\")\n# entries = cursor.fetchall()\n# for e in entries:\n# print(e)\n# cursor.execute(\"SELECT * FROM camera_location_view_road\")\n# entries = cursor.fetchall()\n# for e in entries:\n# print(e)\n# drop_tables(cursor)\n# create_tables(cursor)\n#\n# insert(cnx, cursor, ('camera', \"9C353B5F4CD7\", \"rtsp://10.10.0.211/\", \"Logtech\", \"T35001\", \"1080\", \"1296\", \"140\", \"25\",\n# \"2018-04-08 13:45:45\", \"2019-04-08 13:45:45\", \"active\"))\n# insert(cnx, cursor, ('camera', \"9C353B5F4CD7\", \"rtsp://10.10.0.32:8554/swVideo\", \"Logtech\", \"T35001\", \"1080\", \"1296\",\n# \"140\", \"25\", \"2018-04-08 13:45:45\", \"2019-04-08 13:45:45\", \"active\"))\n# insert(cnx, cursor, ('camera', \"9C353B5F4CD7\", \"rtsp://10.10.0.43:8554/swVideo\", \"Logtech\", \"T35001\", \"1080\", \"1296\",\n# \"140\", \"25\", \"2018-04-08 13:45:45\", \"2019-04-08 13:45:45\", \"active\"))\n#\n# insert(cnx, cursor, ('road', \"I-65\", \"I-65 @ Southern Ave. - Mile 108.4\"))\n# insert(cnx, cursor, ('road', \"I-65\", \"Camera 469 I-65 @ 248.0\"))\n# insert(cnx, cursor, ('road', \"I-65\", \"Camera 472 I-65 @ 235.0\"))\n#\n# insert(cnx, cursor, ('location', \"Southern Ave\", \"39.727772\", \"-86.135454\", None, None))\n# insert(cnx, cursor, ('location', None, \"0\", \"0\", None, None))\n# insert(cnx, cursor, ('location', None, \"0\", \"0\", None, None))\n#\n# insert(cnx, cursor, ('ip_address', \"rtsp://10.10.0.211/\", \"1\", \"1\"))\n# insert(cnx, cursor, ('ip_address', \"rtsp://10.10.0.32:8554/swVideo\", \"2\", \"2\"))\n# insert(cnx, cursor, ('ip_address', \"rtsp://10.10.0.43:8554/swVideo\", \"3\", \"3\"))\n#\n#\n# insert(cnx, cursor, ('camera_location_view', '1', '1', '0', '30', '40', '80', '320', '2018-04-08 13:45:45'))\n# insert(cnx, cursor, ('camera_location_view', '2', '2', '0', '30', '40', '80', '320', '2018-04-08 13:45:45'))\n# insert(cnx, cursor, ('camera_location_view', '3', '3', '0', '30', '40', '80', '320', '2018-04-08 13:45:45'))\n#\n# insert(cnx, cursor, ('camera_location_view_road', '1', '1', 'highway', '70', '3', None, None, None, None, None, None))\n# insert(cnx, cursor, ('camera_location_view_road', '2', '2', 'highway', '70', '3', None, None, None, None, None, None))\n# insert(cnx, cursor, ('camera_location_view_road', '3', '3', 'highway', '70', '3', None, None, None, None, None, None))\n", "sub_path": "database.py", "file_name": "database.py", "file_ext": "py", "file_size_in_byte": 6940, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pymysql.connect", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path", "line_number": 108, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path", "line_number": 113, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path", "line_number": 118, "usage_type": "attribute"}]} +{"seq_id": "315204510", "text": "from copy import copy\nfrom functools import reduce\n\nfrom colorama import Back, Style\n\nfrom .array import random_array\nfrom .const import LINE_SEPARATOR\n\n\ndef test_sort(sort_func, test_arr_length=10, **kwargs):\n if not test_sort.test_array or \\\n test_sort.test_arr_length != test_arr_length:\n test_sort.test_arr_length = test_arr_length\n test_sort.test_array = random_array(test_arr_length)\n test_arr = copy(test_sort.test_array)\n print(LINE_SEPARATOR)\n print('func:', sort_func.__name__, '\\narg:\\n', test_arr)\n increase = kwargs.get('increase', None)\n if increase:\n sort_func(test_arr, increase=increase)\n else:\n sort_func(test_arr)\n print('result\\n', test_arr)\n\n increase = True if increase is None else increase\n\n successful = True\n for i in range(len(test_arr) - 1):\n if increase:\n if test_arr[i] > test_arr[i + 1]:\n successful = False\n break\n else:\n if test_arr[i] < test_arr[i + 1]:\n successful = False\n break\n if successful:\n print(Back.GREEN, 'PASSED')\n else:\n print(Back.RED, 'FAILED')\n print(Style.RESET_ALL)\n\n\ntest_sort.test_array = None\n", "sub_path": "utils/test_misc_func.py", "file_name": "test_misc_func.py", "file_ext": "py", "file_size_in_byte": 1239, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "array.random_array", "line_number": 14, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 15, "usage_type": "call"}, {"api_name": "const.LINE_SEPARATOR", "line_number": 16, "usage_type": "argument"}, {"api_name": "colorama.Back.GREEN", "line_number": 38, "usage_type": "attribute"}, {"api_name": "colorama.Back", "line_number": 38, "usage_type": "name"}, {"api_name": "colorama.Back.RED", "line_number": 40, "usage_type": "attribute"}, {"api_name": "colorama.Back", "line_number": 40, "usage_type": "name"}, {"api_name": "colorama.Style.RESET_ALL", "line_number": 41, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "9896422", "text": "import mysql.connector\nimport requests\nimport re\nfrom bs4 import BeautifulSoup\nfrom sklearn import tree\n\n#function for Grabing data from website and save to mysql database\ndef grab_from_site_and_savemysql(n):\n\n list_rgn = list()\n list_yb = list()\n list_sqrm = list()\n list_prc = list()\n\n for i in range(1, n):\n url = 'https://shabesh.com/search/' + str(i) + \"خرید-فروش/املاک/ایران/\"\n r = requests.get(url)\n soup = BeautifulSoup(r.text, 'html.parser')\n\n rgns = soup.find_all('h2', attrs={'class': 'announce-desc medium-sans pb-2'}) # و متراژ پیدا کردن مناطق\n for rgn_m in rgns:\n rg = re.findall(r'در\\s(.*)\\،', rgn_m.text)\n list_rgn.extend(rg)\n m = re.findall(r\"آپارتمان\\s(\\d+)\", rgn_m.text)\n if len(m)==0:\n list_sqrm.extend('0')\n else:\n list_sqrm.extend(m)\n\n built_years_meter = soup.find_all('li') # پیدا کردن سال ساخت\n for bym in built_years_meter:\n yb = re.findall(r'ساخت(\\s\\d{4})', bym.text)\n if len(yb) != 0:\n list_yb.extend(yb)\n\n all_price = soup.find_all('span', attrs={'class': 'rent pb-2'})\n for prc in all_price:\n cost = re.findall(r'\\d*\\,*\\d+\\,*\\d+\\,*\\d+', prc.text)\n list_prc.extend(cost)\n\n list_rgn.reverse()\n print(list_yb)\n print(list_rgn)\n print(list_sqrm)\n print(list_prc)\n print(len(list_yb), len(list_rgn), len(list_sqrm), len(list_prc)) # functio\n\n for i in range(0, len(list_yb)):\n cnx = mysql.connector.connect(user='root', password='apolo11',\n host='127.0.0.1',\n database='project_advance')\n cursor = cnx.cursor()\n cursor.execute('INSERT INTO project VALUES(\\'%s\\',\\'%i\\',\\'%i\\',\\'%s\\')' % (\n list_rgn[i], int(list_yb[i]), int(list_sqrm[i]), list_prc[i]))\n cnx.commit()\n cnx.close()\n\n\ncnx = mysql.connector.connect(user='ehsan', password='apolo11',\n host='127.0.0.1',\n database='project_advance')\ncursor=cnx.cursor()\nquery = 'SELECT * FROM project'\ncursor.execute(query)\nlist_data=list()\n\nfor data in cursor:\n list_data.append(data)\nY = list()\nX=list()\n\nfor i in range(0,len(list_data)):\n X.append(list_data[i][0:3])\n Y.append(list_data[i][3])\n\nclf = tree.DecisionTreeClassifier()\nclf = clf.fit(X, Y)", "sub_path": "Projectfinal.py", "file_name": "Projectfinal.py", "file_ext": "py", "file_size_in_byte": 2530, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "requests.get", "line_number": 17, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 18, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 22, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 24, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 32, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 38, "usage_type": "call"}, {"api_name": "mysql.connector.connector.connect", "line_number": 49, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 49, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 49, "usage_type": "name"}, {"api_name": "mysql.connector.connector.connect", "line_number": 59, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 59, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 59, "usage_type": "name"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 76, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 76, "usage_type": "name"}]} +{"seq_id": "275571773", "text": "import tensorflow as tf\nimport numpy as np\nimport cleverhans\nfrom cleverhans.model import Model\nfrom cleverhans.attacks import *\nfrom utils import load_model, read_tensor_from_image_file\nimport base64\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport scipy.misc\n\n\nclass InceptionCNNModel(Model):\n model_path = './models/output_graph_2.pb'\n\n def __init__(self):\n super(InceptionCNNModel, self).__init__()\n\n # Load trained model\n load_model(self.model_path)\n # Save input and output tensors references\n graph = tf.get_default_graph()\n self.input_tensor = graph.get_tensor_by_name('Mul:0')\n self.output_tensor = graph.get_tensor_by_name('final_result:0')\n\n def convert_to_classifier(self):\n # Save softmax layer\n self.layer_names = []\n self.layers = []\n self.layers.append(self.output_tensor)\n self.layer_names.append('probs')\n\n def fprop(self, x, set_ref=False):\n return dict(zip(self.layer_names, self.layers))\n\n\nwith tf.Graph().as_default():\n with tf.Session() as sess:\n # Load model\n model = InceptionCNNModel()\n model.convert_to_classifier()\n\n # Load faces\n origin_image = read_tensor_from_image_file('./data/yyy.jpg')\n # with open(\"./data/kotori.jpg\", \"rb\") as image_file:\n # origin_image = base64.encodebytes(image_file.read())\n\n # Define target label\n array = np.zeros((1, 100))\n array[0][24] = 1.\n y_target = tf.convert_to_tensor(array, np.float32)\n\n # Craft adversarial examples\n steps = 5\n eps = 0.1\n alpha = eps / steps\n fgsm = FastGradientMethod(model, back='tf', sess=sess)\n fgsm_params = {'eps': alpha,\n 'y_target': y_target,\n 'clip_min': 0.,\n 'clip_max': 1.}\n adv_x = fgsm.generate(model.input_tensor, **fgsm_params)\n\n adv = origin_image\n for i in range(steps):\n print(\"FGSM step \" + str(i + 1))\n adv = sess.run(adv_x, feed_dict={model.input_tensor: adv})\n\n\n # # Craft adversarial examples\n # bis_params = {\n # 'eps': 0.01,\n # 'eps_iter': 3,\n # 'nb_iter': 10\n # }\n # bis = BasicIterativeMethod(model, back='tf', sess=sess)\n # adv_x = bis.generate(model.input_tensor, **bis_params)\n # adv = sess.run(adv_x, feed_dict={model.input_tensor: origin_image})\n\n def resize_and_to_int(image_array):\n resize = image_array.reshape((299, 299, 3)) * 255\n int_img = resize.astype(np.int)\n return int_img\n\n\n scipy.misc.imsave('./data/outfile.jpg', resize_and_to_int(adv))\n scipy.misc.imsave('./data/infile.jpg', resize_and_to_int(origin_image))\n\n label_lines = [line.rstrip() for line\n in tf.gfile.GFile(\"./models/output_labels_2.txt\")]\n predictions = sess.run(model.output_tensor, feed_dict={model.input_tensor: adv})\n # Sort to show labels of first prediction in order of confidence\n top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]\n\n results = []\n for node_id in top_k:\n human_string = label_lines[node_id]\n score = predictions[0][node_id]\n results.append((human_string, score))\n\n print(results)\n", "sub_path": "adv_learning.py", "file_name": "adv_learning.py", "file_ext": "py", "file_size_in_byte": 3403, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "cleverhans.model.Model", "line_number": 13, "usage_type": "name"}, {"api_name": "utils.load_model", "line_number": 20, "usage_type": "call"}, {"api_name": "tensorflow.get_default_graph", "line_number": 22, "usage_type": "call"}, {"api_name": "tensorflow.Graph", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 38, "usage_type": "call"}, {"api_name": "utils.read_tensor_from_image_file", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow.convert_to_tensor", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 51, "usage_type": "attribute"}, {"api_name": "numpy.int", "line_number": 82, "usage_type": "attribute"}, {"api_name": "scipy.misc.misc.imsave", "line_number": 86, "usage_type": "call"}, {"api_name": "scipy.misc.misc", "line_number": 86, "usage_type": "attribute"}, {"api_name": "scipy.misc", "line_number": 86, "usage_type": "name"}, {"api_name": "scipy.misc.misc.imsave", "line_number": 87, "usage_type": "call"}, {"api_name": "scipy.misc.misc", "line_number": 87, "usage_type": "attribute"}, {"api_name": "scipy.misc", "line_number": 87, "usage_type": "name"}, {"api_name": "tensorflow.gfile.GFile", "line_number": 90, "usage_type": "call"}, {"api_name": "tensorflow.gfile", "line_number": 90, "usage_type": "attribute"}]} +{"seq_id": "518664950", "text": "import json\nfrom spellchecker import SpellChecker\nfrom langdetect import detect\nimport sys\nfrom feelingAnalyzer import FeelingAnalyzer\n\n\ndef Mail_ML (mail_recup):\n\n # print(sys.argv)\n\n '''\n La fonction prends en paramètre un Json avec ce que l'on récupère de l'utilisateur \n Elle rends un json avec les scores calculés pendant le machine learning sur la probabilité que le mail soit un faux \n On prends en compte :\n - Les fautes d'orthographes présentent dans le mail \n '''\n # parse json:\n text = json.loads(mail_recup)\n\n\n #Objet a corriger\n corps_mail=text[\"text\"]\n\n '''\n ANALYSE ORTHOGRAPHE\n '''\n\n\n new_str=\"\"\n foundForbiddenChar = False\n for i in range(0, len(corps_mail)-1, 1):\n # end of corps_mail\n if (i+1 == len(corps_mail)):\n new_str = new_str + corps_mail[i]\n \n if (corps_mail[i+1]!=\"'\" and corps_mail[i+1]!=\",\"):\n if not foundForbiddenChar :\n new_str += corps_mail[i]\n foundForbiddenChar = False\n else :\n foundForbiddenChar = False\n else :\n foundForbiddenChar = True\n\n\n # print(new_str)\n\n \n if (detect(corps_mail)=='fr'):\n spell = SpellChecker(language='fr')\n # print(spell.correction(\"bojour\"))\n\n all_words=new_str.split()\n\n corrected_all_words=[]\n for i in range (0,len(all_words),1):\n\n #print(spell.correction(l[i]))\n corrected_all_words.append(spell.correction(all_words[i]))\n\n if (detect(corps_mail)=='en'):\n spell = SpellChecker(language='en')\n # print(spell.correction(\"bojour\"))\n\n all_words=new_str.split()\n\n corrected_all_words=[]\n for i in range (0,len(all_words),1):\n\n #print(spell.correction(l[i]))\n corrected_all_words.append(spell.correction(all_words[i]))\n\n\n # print (corrected_all_words)\n\n count_errors=0\n for i in range (0,len(corrected_all_words),1):\n if (corrected_all_words[i]!=all_words[i]):\n count_errors+=1\n\n\n \n if(len(corps_mail)<150 and count_errors>6):\n pourcentage_faute=80\n\n if(len(corps_mail)<150 and count_errors<=6):\n pourcentage_faute=20\n \n if(15010):\n pourcentage_faute=80\n \n if(len(corps_mail)>450 and count_errors<=10):\n pourcentage_faute=20\n \n if(len(corps_mail)>450 and 10450 and count_errors>15):\n pourcentage_faute=80\n \n\n #print('{\"Erreur\":'+ str(pourcentage_faute)+\"%\"'}')\n\n\n\n '''\n ANALYSE SENTIMENTS (Positivity, engaging, alarming)\n '''\n\n\n resultPositivity = FeelingAnalyzer.predictMailFeeling_Positivity(\n verbose= False,\n mailToAnalyse = corps_mail\n )\n #print(resultPositivity)\n scorePositivity = 15\n if resultPositivity[0] == \"positive\" : scorePositivity += 35\n if resultPositivity[1] == \"positive\" : scorePositivity += 35\n #print(\"scorePositivity = \", scorePositivity)\n\n\n resultEngaging = FeelingAnalyzer.predictMailFeeling_Engaging(\n verbose= False,\n mailToAnalyse = corps_mail\n )\n #print(resultEngaging)\n scoreEngaging = 15\n if resultEngaging[0] == \"engaging\" : scoreEngaging += 35\n if resultEngaging[1] == \"engaging\" : scoreEngaging += 35\n #print(\"scoreEngaging = \", scoreEngaging)\n\n\n resultAlarming = FeelingAnalyzer.predictMailFeeling_Alarming(\n verbose= False,\n mailToAnalyse = corps_mail\n )\n #print(resultAlarming)\n scoreAlarming = 15\n if resultAlarming[0] == \"alarming\" : scoreAlarming += 35\n if resultAlarming[1] == \"alarming\" : scoreAlarming += 35\n #print(\"scoreAlarming = \", scoreAlarming)\n\n stringResult = {\n \"scoreOrthographe\": pourcentage_faute,\n \"scorePositivity\": scorePositivity, \n \"scoreEngaging\": scoreEngaging,\n \"scoreAlarming\": scoreAlarming\n }\n\n print('{\"orthographe\":'+ str(pourcentage_faute)+',\"positivite\":'+str(scorePositivity)+',\"engagement\":'+str(scoreEngaging)+',\"alarmant\":'+str(scoreAlarming)+'}')\n\n# print(\"-------------------------------------------------BAD MAILS-----------------------------------------------------\")\n\n# print(\"--------------------1-----------------------\")\n# our_mail1='{\"objet\" : \"Support NETFLIX\", \"text\": \"Bonjour, Nous n\\'avons pas pu autoriser votre paiement pour le prochain cycle de facturation de votre abonnement. Nous serions bien évidemment très heureux de vous compter à nouveau parmi nous. cliquez simplement sur, réactivez simplement votre abonnement pour profiter des meilleurs films et séries TV sans interruption. RÉACTIVER L\\'ABONNEMENT. Nous sommes là pour vous aider. Pour plus d\\'informations, consultez le Centre d\\'aide ou contactez-nous. L\\'équipe Netflix\"}'\nour_mail1=sys.argv[1]\nMail_ML(our_mail1)", "sub_path": "v1/python/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 5088, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "json.loads", "line_number": 19, "usage_type": "call"}, {"api_name": "langdetect.detect", "line_number": 50, "usage_type": "call"}, {"api_name": "spellchecker.SpellChecker", "line_number": 51, "usage_type": "call"}, {"api_name": "langdetect.detect", "line_number": 62, "usage_type": "call"}, {"api_name": "spellchecker.SpellChecker", "line_number": 63, "usage_type": "call"}, {"api_name": "feelingAnalyzer.FeelingAnalyzer.predictMailFeeling_Positivity", "line_number": 118, "usage_type": "call"}, {"api_name": "feelingAnalyzer.FeelingAnalyzer", "line_number": 118, "usage_type": "name"}, {"api_name": "feelingAnalyzer.FeelingAnalyzer.predictMailFeeling_Engaging", "line_number": 129, "usage_type": "call"}, {"api_name": "feelingAnalyzer.FeelingAnalyzer", "line_number": 129, "usage_type": "name"}, {"api_name": "feelingAnalyzer.FeelingAnalyzer.predictMailFeeling_Alarming", "line_number": 140, "usage_type": "call"}, {"api_name": "feelingAnalyzer.FeelingAnalyzer", "line_number": 140, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 163, "usage_type": "attribute"}]} +{"seq_id": "111356627", "text": "from django.contrib.contenttypes.models import ContentType\nfrom django.test import TestCase, modify_settings, override_settings\n\nfrom glitter.blocks.html.models import HTML\nfrom glitter.models import ContentBlock, Version\nfrom glitter.page import Glitter\nfrom glitter.pages.models import Page\n\n\nclass TestGlitterDefaultBlocks(TestCase):\n def setUp(self):\n self.page = Page.objects.create(url='/test/', title='Test page')\n self.page_version = Version.objects.create(\n content_type=ContentType.objects.get_for_model(Page),\n object_id=self.page.id,\n template_name='glitter/sample.html',\n )\n self.glitter = Glitter(self.page_version)\n\n @override_settings(\n GLITTER_DEFAULT_BLOCKS=None,\n )\n @modify_settings(INSTALLED_APPS={\n 'append': 'glitter.blocks.image',\n })\n def test_default_blocks(self):\n # Standard blocks: Text, Image, HTML\n self.assertEqual(\n self.glitter.default_blocks, [\n ('glitter_redactor.Redactor', 'Text'),\n ('glitter_image.ImageBlock', 'Image'),\n ('glitter_html.HTML', 'HTML'),\n ],\n )\n\n @override_settings(\n GLITTER_DEFAULT_BLOCKS=[('glitter_html.HTML', 'HTML')],\n )\n def test_custom_blocks(self):\n # Custom will return whatever is given to it\n self.assertEqual(self.glitter.default_blocks, [('glitter_html.HTML', 'HTML')])\n\n\nclass TestGlitterQueries(TestCase):\n def setUp(self):\n self.page = Page.objects.create(url='/test/', title='Test page')\n self.page_version = Version.objects.create(\n content_type=ContentType.objects.get_for_model(Page),\n object_id=self.page.id,\n template_name='glitter/sample.html',\n )\n\n def test_html_blocks(self):\n # Add 100 HTML blocks to a page\n html_content_type = ContentType.objects.get_for_model(HTML)\n\n for block_position in range(1, 101):\n html_block = HTML.objects.create(content='

HTML Block

')\n content_block = ContentBlock.objects.create(\n obj_version=self.page_version,\n column='main_content',\n position=block_position,\n content_type=html_content_type,\n object_id=html_block.id,\n )\n html_block.content_block = content_block\n html_block.save(update_fields=['content_block'])\n\n # Two queries:\n # - One to select content blocks, along with their content types\n # - Another to select all HTML blocks\n with self.assertNumQueries(2):\n glitter = Glitter(page_version=self.page_version)\n glitter.render()\n", "sub_path": "glitter/tests/test_glitter.py", "file_name": "test_glitter.py", "file_ext": "py", "file_size_in_byte": 2729, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.test.TestCase", "line_number": 10, "usage_type": "name"}, {"api_name": "glitter.pages.models.Page.objects.create", "line_number": 12, "usage_type": "call"}, {"api_name": "glitter.pages.models.Page.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "glitter.pages.models.Page", "line_number": 12, "usage_type": "name"}, {"api_name": "glitter.models.Version.objects.create", "line_number": 13, "usage_type": "call"}, {"api_name": "glitter.models.Version.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "glitter.models.Version", "line_number": 13, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects.get_for_model", "line_number": 14, "usage_type": "call"}, {"api_name": "glitter.pages.models.Page", "line_number": 14, "usage_type": "argument"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.contrib.contenttypes.models.ContentType", "line_number": 14, "usage_type": "name"}, {"api_name": "glitter.page.Glitter", "line_number": 18, "usage_type": "call"}, {"api_name": "django.test.override_settings", "line_number": 20, "usage_type": "call"}, {"api_name": "django.test.modify_settings", "line_number": 23, "usage_type": "call"}, {"api_name": "django.test.override_settings", "line_number": 36, "usage_type": "call"}, {"api_name": "django.test.TestCase", "line_number": 44, "usage_type": "name"}, {"api_name": "glitter.pages.models.Page.objects.create", "line_number": 46, "usage_type": "call"}, {"api_name": "glitter.pages.models.Page.objects", "line_number": 46, "usage_type": "attribute"}, {"api_name": "glitter.pages.models.Page", "line_number": 46, "usage_type": "name"}, {"api_name": "glitter.models.Version.objects.create", "line_number": 47, "usage_type": "call"}, {"api_name": "glitter.models.Version.objects", "line_number": 47, "usage_type": "attribute"}, {"api_name": "glitter.models.Version", "line_number": 47, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects.get_for_model", "line_number": 48, "usage_type": "call"}, {"api_name": "glitter.pages.models.Page", "line_number": 48, "usage_type": "argument"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects", "line_number": 48, "usage_type": "attribute"}, {"api_name": "django.contrib.contenttypes.models.ContentType", "line_number": 48, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects.get_for_model", "line_number": 55, "usage_type": "call"}, {"api_name": "glitter.blocks.html.models.HTML", "line_number": 55, "usage_type": "argument"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects", "line_number": 55, "usage_type": "attribute"}, {"api_name": "django.contrib.contenttypes.models.ContentType", "line_number": 55, "usage_type": "name"}, {"api_name": "glitter.blocks.html.models.HTML.objects.create", "line_number": 58, "usage_type": "call"}, {"api_name": "glitter.blocks.html.models.HTML.objects", "line_number": 58, "usage_type": "attribute"}, {"api_name": "glitter.blocks.html.models.HTML", "line_number": 58, "usage_type": "name"}, {"api_name": "glitter.models.ContentBlock.objects.create", "line_number": 59, "usage_type": "call"}, {"api_name": "glitter.models.ContentBlock.objects", "line_number": 59, "usage_type": "attribute"}, {"api_name": "glitter.models.ContentBlock", "line_number": 59, "usage_type": "name"}, {"api_name": "glitter.blocks.html.models", "line_number": 73, "usage_type": "name"}, {"api_name": "glitter.page.Glitter", "line_number": 73, "usage_type": "call"}, {"api_name": "glitter.blocks.html.models.render", "line_number": 74, "usage_type": "call"}, {"api_name": "glitter.blocks.html.models", "line_number": 74, "usage_type": "name"}]} +{"seq_id": "539541117", "text": "# ===================================\n#coding=utf-8\n#author='Shichao-Dong'\n#该文件用于编写公共方法,与业务逻辑无法\n# ===================================\n\nfrom appium import webdriver\nimport time\n\ndef mydriver():\n global driver\n desired_caps = {\n 'platformName':'Android',\n 'deviceName':'G40GLD4572500169',\n 'platformVersion':'6.0',\n 'appPackage':'com.fiberhome.waiqin365.client',\n 'appActivity':'com.waiqin365.base.login.LoginSplashActivity',\n 'unicodeKeyboard':True,\n 'resetKeyboard':True\n }\n driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub',desired_caps)\n time.sleep(2)\n a=driver.find_elements_by_class_name('android.widget.Button')\n if a:\n textfields = driver.find_elements_by_class_name(\"android.widget.EditText\")\n textfields[0].send_keys('dongshichao')\n textfields[1].send_keys('dong')\n textfields[2].send_keys('a111111')\n driver.find_element_by_id('com.fiberhome.waiqin365.client:id/btn_login').click()\n time.sleep(2)\n else:\n time.sleep(2)\n\n\n\n\ndef get_window_size():\n '''\n 获取屏幕大小\n :return: windowsize\n '''\n global windowSize\n windowSize = driver.get_window_size()\n return windowSize", "sub_path": "Appium/util/public.py", "file_name": "public.py", "file_ext": "py", "file_size_in_byte": 1338, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "appium.webdriver.Remote", "line_number": 21, "usage_type": "call"}, {"api_name": "appium.webdriver", "line_number": 21, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 22, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 30, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "241772269", "text": "#!/usr/bin/env python3\nimport os\nimport ast\nimport sys\nimport argparse\nimport warnings\nwarnings.simplefilter(\"ignore\", UserWarning)\nimport pandas as pd\nimport app.app as app\nimport tests.tests as test\nfrom app.experiments import combo\n\n\ndef add_args():\n description = 'Medifor reasoning engine for detection and localization.'\n parser = argparse.ArgumentParser(description=description, prog='main')\n\n # high level args\n group = parser.add_mutually_exclusive_group()\n group.add_argument('--run', '-r', action='store_true',\n help='Run reasoning engine, default: %(default)s')\n group.add_argument('--combo_exp', action='store_true',\n help='Combinations Experiment, default: %(default)s')\n group.add_argument('--score_local', action='store_true',\n help='Do local mask scoring, default: %(default)s')\n group.add_argument('--unit_tests', action='store_true',\n help='Run unit tests, default: %(default)s')\n group.add_argument('--integration_tests', action='store_true',\n help='Run integration tests, default: %(default)s')\n\n # general args that overlap among different APIs\n parser.add_argument('--algorithms', default='base', metavar='ALGS',\n help='set of indicators, default: %(default)s')\n parser.add_argument('--bins', default=5, metavar='NUM', type=int,\n help='number of feature bins, default: %(default)s')\n parser.add_argument('--bin_percent', action='store_true',\n help='bin using percentages, default: %(default)s')\n parser.add_argument('--cv_folds', default=2, metavar='NUM', type=int,\n help='number of cross-val folds, default: %(default)s')\n parser.add_argument('--data_dir', default='data/', metavar='DIR',\n help='data directory, default: %(default)s')\n parser.add_argument('--eval', default='cc', metavar='SCHEMA',\n help='evaluation method, default: %(default)s')\n parser.add_argument('--explain_correct', action='store_true',\n help='explain correct preds, default: %(default)s')\n parser.add_argument('--explain_features', action='store_true',\n help='explain features, default: %(default)s')\n parser.add_argument('--explain_preds', default=0, metavar='NUM', type=int,\n help='preds to explain, default: %(default)s')\n parser.add_argument('--include_og_data', action='store_true',\n help='use og data for stacking, default: %(default)s')\n parser.add_argument('--learner', default='lr', metavar='MODEL',\n help='classifier, default: %(default)s')\n parser.add_argument('--metrics', nargs='*', metavar='METRIC',\n help='list of local metrics, default: %(default)s')\n parser.add_argument('--normalization', default='minmax', metavar='TYPE',\n help='normalization method, default: %(default)s')\n parser.add_argument('--param_search', default='low', metavar='LEVEL',\n help='parameter search, default: %(default)s')\n parser.add_argument('--sublearners', nargs='*', metavar='MODEL',\n help='level-0 learners, default: %(default)s')\n parser.add_argument('--sub_dir', metavar='DIR', type=str, default='',\n help='submission directory, default: %(default)s')\n parser.add_argument('--task', default='global', metavar='TASK',\n help='detection task, default: %(default)s')\n parser.add_argument('--test_sets', default='3', metavar='DSET',\n help='test datasets, default: %(default)s')\n parser.add_argument('--train_sets', default='2', metavar='DSET',\n help='training datasets, default: %(default)s')\n parser.add_argument('--tune_folds', default=5, metavar='NUM', type=int,\n help='number of tuning folds, default: %(default)s')\n parser.add_argument('--use_image_quality', action='store_true',\n help='use image quality feature, default: %(default)s')\n parser.add_argument('--val_sets', default=None, metavar='DSET',\n help='validation datasets, default: %(default)s')\n parser.add_argument('--verbose', default=0, metavar='LEVEL', type=int,\n help='verbosity level, default: %(default)s')\n\n # experiment specific args\n parser.add_argument('--learners', nargs='*', metavar='MODEL',\n help='list of learners, default: %(default)s')\n parser.add_argument('--alg_list', nargs='*', metavar='ALGS',\n help='list of indicator sets, default: %(default)s')\n parser.add_argument('--dset_list', nargs='*', metavar='DSET',\n default=['2,None,3'],\n help='list of dset tuples, default: %(default)s')\n\n return parser\n\n\ndef parse_args(parser):\n p = {}\n a = parser.parse_args()\n\n p['algorithms'] = a.algorithms\n p['alg_list'] = a.alg_list if a.alg_list is not None else ['base']\n p['bins'] = a.bins\n p['bin_percent'] = a.bin_percent\n p['cross_val_folds'] = a.cv_folds\n p['dset_list'] = [ast.literal_eval(s) for s in a.dset_list]\n p['data_dir'] = a.data_dir\n p['eval'] = a.eval\n p['explain_correct'] = a.explain_correct\n p['explain_features'] = a.explain_features\n p['explain_preds'] = a.explain_preds\n p['include_og_data'] = a.include_og_data\n p['learner'] = a.learner\n p['learners'] = a.learners if a.learners is not None else ['lr']\n p['metrics'] = a.metrics if a.metrics is not None else ['logloss', 'mse']\n p['normalization'] = a.normalization\n p['param_search'] = a.param_search\n p['sublearners'] = a.sublearners if a.sublearners is not None else []\n p['sub_dir'] = a.sub_dir\n p['task'] = a.task\n p['test_sets'] = a.test_sets\n p['train_sets'] = a.train_sets\n p['tune_folds'] = a.tune_folds\n p['use_image_quality'] = a.use_image_quality\n p['val_sets'] = a.val_sets\n p['verbose'] = a.verbose\n\n return a, p\n\n\ndef global_settings(max_columns=100):\n \"\"\"\n Sets application settings, mainly pertaining to pandas viewing.\n\n Parameters\n ----------\n max_columns : int, default: 100\n Maximum number of columns to display when viewing a pandas dataframe.\n \"\"\"\n os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' # after updating xgb\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # ignores keras CPU improvements\n warnings.simplefilter(action='ignore', category=FutureWarning)\n warnings.simplefilter(action='ignore', category=UserWarning)\n warnings.simplefilter(action='ignore', category=DeprecationWarning)\n\n pd.options.mode.chained_assignment = None\n if os.isatty(sys.stdin.fileno()):\n rows, columns = os.popen('stty size', 'r').read().split()\n pd.set_option('display.width', int(columns))\n pd.set_option('display.max_columns', max_columns)\n\n\ndef main():\n global_settings()\n parser = add_args()\n args, p = parse_args(parser)\n\n if args.run:\n app.run(model_type=p['learner'], train_sets=p['train_sets'],\n val_sets=p['val_sets'], test_sets=p['test_sets'],\n use_image_quality=p['use_image_quality'],\n algorithms=p['algorithms'], gs_folds=p['tune_folds'],\n cv_folds=p['cross_val_folds'], evaluation=p['eval'],\n normalization=p['normalization'], task=p['task'],\n verbose=p['verbose'], param_search=p['param_search'],\n sublearners=p['sublearners'],\n include_og_data=p['include_og_data'],\n bins=p['bins'], percentage_binning=p['bin_percent'],\n explain_preds=p['explain_preds'],\n explain_correct=p['explain_correct'],\n explain_features=p['explain_features'],\n data_dir=p['data_dir'])\n\n elif args.combo_exp:\n combo.combo_exp(dsets=p['dset_list'], model_types=p['learners'],\n param_search=p['param_search'], img_qualities=[True],\n algorithms=p['alg_list'], bins=p['bins'],\n fname='combo_new.csv', verbose=p['verbose'],\n data_dir=p['data_dir'], task=p['task'])\n\n elif args.score_local:\n app.score_local(sub_dir=p['sub_dir'], test_sets=p['test_sets'],\n metrics=p['metrics'], data_dir=p['data_dir'])\n\n elif args.unit_tests:\n test.run_unit_tests()\n\n elif args.integration_tests:\n test.run_integration_tests()\n\nif __name__ == '__main__':\n main()\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 8743, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "warnings.simplefilter", "line_number": 7, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 16, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 100, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 134, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 135, "usage_type": "attribute"}, {"api_name": "warnings.simplefilter", "line_number": 136, "usage_type": "call"}, {"api_name": "warnings.simplefilter", "line_number": 137, "usage_type": "call"}, {"api_name": "warnings.simplefilter", "line_number": 138, "usage_type": "call"}, {"api_name": "pandas.options", "line_number": 140, "usage_type": "attribute"}, {"api_name": "os.isatty", "line_number": 141, "usage_type": "call"}, {"api_name": "sys.stdin.fileno", "line_number": 141, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 141, "usage_type": "attribute"}, {"api_name": "os.popen", "line_number": 142, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 143, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 144, "usage_type": "call"}, {"api_name": "app.app.run", "line_number": 153, "usage_type": "call"}, {"api_name": "app.app", "line_number": 153, "usage_type": "name"}, {"api_name": "app.experiments.combo.combo_exp", "line_number": 169, "usage_type": "call"}, {"api_name": "app.experiments.combo", "line_number": 169, "usage_type": "name"}, {"api_name": "app.app.score_local", "line_number": 176, "usage_type": "call"}, {"api_name": "app.app", "line_number": 176, "usage_type": "name"}, {"api_name": "tests.tests.run_unit_tests", "line_number": 180, "usage_type": "call"}, {"api_name": "tests.tests", "line_number": 180, "usage_type": "name"}, {"api_name": "tests.tests.run_integration_tests", "line_number": 183, "usage_type": "call"}, {"api_name": "tests.tests", "line_number": 183, "usage_type": "name"}]} +{"seq_id": "628295485", "text": "from django.db import models\nfrom django.db import transaction\nfrom django.core.exceptions import ObjectDoesNotExist\n\nfrom apps.common.functions import require_lock\n\nimport logging\nlogger = logging.getLogger(__name__)\nfrom apps.common.logger import extra, started, finished, incompleted\n\nfrom datetime import datetime\nfrom project.settings import DEFAULT_FAILURE_MESSAGE\nfailure = {'code': 500, 'description': DEFAULT_FAILURE_MESSAGE}\n\nfrom apps.tmt.companies.models import Companies\nfrom apps.tmt.auditlogs.models import Auditlogs\nfrom apps.tmt.environments.models import Environments\n\n\nclass EnvironmentsvariablesObjectsManager(models.Manager):\n def Create(self, environment_id, variables, **kwargs):\n logger.info(started, **extra(kwargs))\n\n try:\n environmentsvariables_object = Environmentsvariables(\n company=kwargs.get('company_object'),\n environment_id=environment_id,\n variables=variables)\n\n environmentsvariables_object.save()\n\n Auditlogs.objects.Create(\n date=datetime.utcnow(),\n action='create',\n item='environmentvariables',\n item_id=environmentsvariables_object.id,\n new_value=None, # version here\n **kwargs)\n\n logger.info(finished, **extra(kwargs))\n return {'code': 200,\n 'environmentsvariables_object': environmentsvariables_object}\n\n except Exception as why:\n logger.critical(incompleted + str(why), **extra(kwargs))\n return failure\n\n def Read(self, environment_id, **kwargs):\n logger.info(started, **extra(kwargs))\n\n company = kwargs.get('company_object')\n\n try:\n environmentsvariables_object = Environmentsvariables.objects.get(company=company,\n environment_id=environment_id)\n\n logger.info(finished, **extra(kwargs))\n return {'code': 200, 'environmentsvariables_object': environmentsvariables_object}\n\n except ObjectDoesNotExist as why:\n return {'code': 200}\n\n except Exception as why:\n params = ' environment_id: %s' % (environment_id)\n logger.critical(incompleted + str(why) + params, **extra(kwargs))\n return {'failure': DEFAULT_FAILURE_MESSAGE}\n\n def Update(self, environment_id, variables, **kwargs):\n logger.info(started, **extra(kwargs))\n\n environmentsvariables = Environmentsvariables.objects.Read(environment_id, **kwargs)\n if environmentsvariables.get('code') != 200:\n logger.info(incompleted, **extra(kwargs))\n return environmentsvariables\n\n environmentsvariables_object = environmentsvariables.get('environmentsvariables_object')\n\n try:\n environmentsvariables_object.variables = variables\n environmentsvariables_object.save()\n\n Auditlogs.objects.Create(\n date=datetime.utcnow(),\n action='update',\n item='environment',\n item_id=environmentsvariables_object.id,\n new_value=variables,\n **kwargs)\n\n logger.info(finished, **extra(kwargs))\n return {'code': 200, 'environmentsvariables_object': environmentsvariables_object}\n\n except Exception as why:\n logger.critical(incompleted + str(why), **extra(kwargs))\n return failure\n\n\nclass Environmentsvariables(models.Model):\n related_name = 'environmentsvariables_fk_to_companies'\n company = models.ForeignKey(Companies, related_name=related_name)\n\n related_name = 'environmentsvariables_fk_to_environments'\n environment = models.ForeignKey(Environments, related_name=related_name)\n\n variables = models.TextField(null=False)\n\n objects = EnvironmentsvariablesObjectsManager()\n\n def __unicode__(self):\n return self.id\n\n class Meta:\n db_table = \"environmentsvariables\"\n unique_together = ((\"company\", \"environment\"))\n ordering = ('-id',)\n", "sub_path": "apps/tmt/environmentsvariables/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 4137, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "project.settings.DEFAULT_FAILURE_MESSAGE", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.Manager", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "apps.common.logger.started", "line_number": 22, "usage_type": "argument"}, {"api_name": "apps.common.logger.extra", "line_number": 22, "usage_type": "call"}, {"api_name": "apps.tmt.auditlogs.models.Auditlogs.objects.Create", "line_number": 32, "usage_type": "call"}, {"api_name": "apps.tmt.auditlogs.models.Auditlogs.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "apps.tmt.auditlogs.models.Auditlogs", "line_number": 32, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 33, "usage_type": "name"}, {"api_name": "apps.common.logger.finished", "line_number": 40, "usage_type": "argument"}, {"api_name": "apps.common.logger.extra", "line_number": 40, "usage_type": "call"}, {"api_name": "apps.common.logger.incompleted", "line_number": 45, "usage_type": "name"}, {"api_name": "apps.common.logger.extra", "line_number": 45, "usage_type": "call"}, {"api_name": "apps.common.logger.started", "line_number": 49, "usage_type": "argument"}, {"api_name": "apps.common.logger.extra", "line_number": 49, "usage_type": "call"}, {"api_name": "apps.common.logger.finished", "line_number": 57, "usage_type": "argument"}, {"api_name": "apps.common.logger.extra", "line_number": 57, "usage_type": "call"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 60, "usage_type": "name"}, {"api_name": "apps.common.logger.incompleted", "line_number": 65, "usage_type": "name"}, {"api_name": "apps.common.logger.extra", "line_number": 65, "usage_type": "call"}, {"api_name": "project.settings.DEFAULT_FAILURE_MESSAGE", "line_number": 66, "usage_type": "name"}, {"api_name": "apps.common.logger.started", "line_number": 69, "usage_type": "argument"}, {"api_name": "apps.common.logger.extra", "line_number": 69, "usage_type": "call"}, {"api_name": "apps.common.logger.incompleted", "line_number": 73, "usage_type": "argument"}, {"api_name": "apps.common.logger.extra", "line_number": 73, "usage_type": "call"}, {"api_name": "apps.tmt.auditlogs.models.Auditlogs.objects.Create", "line_number": 82, "usage_type": "call"}, {"api_name": "apps.tmt.auditlogs.models.Auditlogs.objects", "line_number": 82, "usage_type": "attribute"}, {"api_name": "apps.tmt.auditlogs.models.Auditlogs", "line_number": 82, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 83, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 83, "usage_type": "name"}, {"api_name": "apps.common.logger.finished", "line_number": 90, "usage_type": "argument"}, {"api_name": "apps.common.logger.extra", "line_number": 90, "usage_type": "call"}, {"api_name": "apps.common.logger.incompleted", "line_number": 94, "usage_type": "name"}, {"api_name": "apps.common.logger.extra", "line_number": 94, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 98, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 98, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 100, "usage_type": "call"}, {"api_name": "apps.tmt.companies.models.Companies", "line_number": 100, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 100, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 103, "usage_type": "call"}, {"api_name": "apps.tmt.environments.models.Environments", "line_number": 103, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 103, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 105, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 105, "usage_type": "name"}]} +{"seq_id": "243432307", "text": "import torch\nimport torchvision.transforms as T\nfrom torch.utils.data import DataLoader\n\n\nfrom .Market1501 import Market1501\nfrom .bases import ImageDataset\nfrom .preprocessing import RandomErasing\nfrom .sampler import RandomIdentitySampler\n\nfrom PIL import Image\nimport numpy as np\n\n\ndef train_collate_fn(batch):\n imgs, pids, _, _, = zip(*batch)\n pids = torch.tensor(pids, dtype=torch.int64)\n return torch.stack(imgs, dim=0), pids\n#collate_fn这个函数的输入就是一个list,list的长度是一个batch size,list中的每个元素都是__getitem__得到的结果\n\ndef val_collate_fn(batch):\n imgs, pids, camids, _ = zip(*batch)\n return torch.stack(imgs, dim=0), pids, camids\n\n\ndef make_dataloader(Cfg):\n train_transforms = T.Compose([\n T.Resize(Cfg.INPUT_SIZE),\n T.RandomHorizontalFlip(p=0.5),\n T.Pad(10),\n T.RandomCrop(Cfg.INPUT_SIZE),\n #T.RandomRotation(10, resample=Image.BICUBIC, expand=False, center=None),\n T.ToTensor(),\n T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n RandomErasing(probability=0.5, sh=0.4, mean=(0.4914, 0.4822, 0.4465))\n ])\n\n val_transforms = T.Compose([\n T.Resize(Cfg.INPUT_SIZE),\n T.ToTensor(),\n T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ])\n\n num_workers = Cfg.DATALOADER_NUM_WORKERS\n dataset = Market1501(data_dir = Cfg.DATA_DIR, verbose = True)\n num_classes = dataset.num_train_pids\n\n train_set = ImageDataset(dataset.train, train_transforms)\n\n if Cfg.SAMPLER == 'softmax':\n train_loader = DataLoader(train_set,\n batch_size = Cfg.BATCHSIZE,\n shuffle = False,\n num_workers = num_workers,\n sampler = RandomIdentitySampler(dataset.train, Cfg.BATCHSIZE, Cfg.NUM_IMG_PER_ID),\n collate_fn = train_collate_fn, #customized batch sampler\n drop_last = True\n )\n else:\n print('unsupported sampler! expected softmax but got {}'.format(Cfg.SAMPLER))\n\n val_set = ImageDataset(dataset.query + dataset.gallery, val_transforms)\n val_loader = DataLoader(val_set,\n batch_size=Cfg.TEST_IMS_PER_BATCH,\n shuffle=False, num_workers=num_workers,\n collate_fn=val_collate_fn\n )\n return train_loader, val_loader, len(dataset.query), num_classes", "sub_path": "datasets/make_dataloader.py", "file_name": "make_dataloader.py", "file_ext": "py", "file_size_in_byte": 2346, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "torch.tensor", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.int64", "line_number": 17, "usage_type": "attribute"}, {"api_name": "torch.stack", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 23, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 27, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 27, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 28, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 28, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomHorizontalFlip", "line_number": 29, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 29, "usage_type": "name"}, {"api_name": "torchvision.transforms.Pad", "line_number": 30, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 30, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomCrop", "line_number": 31, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 31, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 33, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 33, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 34, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 34, "usage_type": "name"}, {"api_name": "preprocessing.RandomErasing", "line_number": 35, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 38, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 38, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 39, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 39, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 40, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 40, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 41, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 41, "usage_type": "name"}, {"api_name": "Market1501.Market1501", "line_number": 45, "usage_type": "call"}, {"api_name": "bases.ImageDataset", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 51, "usage_type": "call"}, {"api_name": "sampler.RandomIdentitySampler", "line_number": 55, "usage_type": "call"}, {"api_name": "bases.ImageDataset", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "583214626", "text": "import onnx\nimport onnx.utils\nfrom onnx import optimizer\nimport sys\nimport argparse\n\nfrom tools import eliminating\nfrom tools import fusing\nfrom tools import replacing\nfrom tools import other\nfrom tools import special\nfrom tools import combo\n# from tools import temp\n\n# Main process\n# Argument parser\nparser = argparse.ArgumentParser(description=\"Optimize an ONNX model for Kneron compiler\")\nparser.add_argument('in_file', help='input ONNX FILE')\nparser.add_argument('-o', '--output', dest='out_file', type=str, help=\"ouput ONNX FILE\")\nparser.add_argument('--bgr', action='store_true', default=False, help=\"set if the model is trained in BGR mode\")\nparser.add_argument('--norm', action='store_true', default=False, help=\"set if you have the input -0.5~0.5\")\nparser.add_argument('--rgba2yynn', action='store_true', default=False, help=\"set if the model has yynn input but you want to take rgba images\")\nparser.add_argument('--split-convtranspose', dest='split_convtranspose', action='store_true', default=False,\n help=\"set if you want to split ConvTranspose into Conv and special Upsample\")\nparser.add_argument('--add-bn-on-skip', dest='bn_on_skip', action='store_true', default=False,\n help=\"set if you only want to add BN on skip branches\")\nparser.add_argument('--add-bn', dest='bn_before_add', action='store_true', default=False,\n help=\"set if you want to add BN before Add\")\nparser.add_argument('-t', '--eliminate-tail-unsupported', dest='eliminate_tail', action='store_true', default=False,\n help='whether remove the last unsupported node for hardware')\nparser.add_argument('--no-bn-fusion', dest='disable_fuse_bn', action='store_true', default=False,\n help=\"set if you have met errors which related to inferenced shape mismatch. This option will prevent fusing BatchNormailization into Conv.\")\n\nargs = parser.parse_args()\n\nif args.out_file is None:\n outfile = args.in_file[:-5] + \"_polished.onnx\"\nelse:\n outfile = args.out_file\n\n# onnx Polish model includes:\n# -- nop\n# -- eliminate_identity\n# -- eliminate_nop_transpose\n# -- eliminate_nop_pad\n# -- eliminate_unused_initializer\n# -- fuse_consecutive_squeezes\n# -- fuse_consecutive_transposes\n# -- fuse_add_bias_into_conv\n# -- fuse_transpose_into_gemm\n\n# Basic model organize\nm = onnx.load(args.in_file)\n# temp.weight_broadcast(m.graph)\nm = combo.preprocess(m, args.disable_fuse_bn)\n# temp.fuse_bias_in_consecutive_1x1_conv(m.graph)\n\n# Add BN on skip branch\nif args.bn_on_skip:\n other.add_bn_on_skip_branch(m.graph)\nelif args.bn_before_add:\n other.add_bn_before_add(m.graph)\n other.add_bn_before_activation(m.graph)\n# Split deconv\nif args.split_convtranspose:\n other.split_ConvTranspose(m)\n\n# My optimization\nm = combo.common_optimization(m)\n# Special options\nif args.bgr:\n special.change_input_from_bgr_to_rgb(m)\nif args.norm:\n special.add_0_5_to_normalized_input(m)\nif args.rgba2yynn:\n special.add_rgb2yynn_node(m)\n\n# Remove useless last node\nif args.eliminate_tail:\n eliminating.remove_useless_last_nodes(m.graph)\n\n# Postprocessing\nm = combo.postprocess(m)\nonnx.save(m, outfile)\n", "sub_path": "optimizer_scripts/onnx2onnx.py", "file_name": "onnx2onnx.py", "file_ext": "py", "file_size_in_byte": 3188, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 17, "usage_type": "call"}, {"api_name": "onnx.load", "line_number": 53, "usage_type": "call"}, {"api_name": "tools.combo.preprocess", "line_number": 55, "usage_type": "call"}, {"api_name": "tools.combo", "line_number": 55, "usage_type": "name"}, {"api_name": "tools.other.add_bn_on_skip_branch", "line_number": 60, "usage_type": "call"}, {"api_name": "tools.other", "line_number": 60, "usage_type": "name"}, {"api_name": "tools.other.add_bn_before_add", "line_number": 62, "usage_type": "call"}, {"api_name": "tools.other", "line_number": 62, "usage_type": "name"}, {"api_name": "tools.other.add_bn_before_activation", "line_number": 63, "usage_type": "call"}, {"api_name": "tools.other", "line_number": 63, "usage_type": "name"}, {"api_name": "tools.other.split_ConvTranspose", "line_number": 66, "usage_type": "call"}, {"api_name": "tools.other", "line_number": 66, "usage_type": "name"}, {"api_name": "tools.combo.common_optimization", "line_number": 69, "usage_type": "call"}, {"api_name": "tools.combo", "line_number": 69, "usage_type": "name"}, {"api_name": "tools.special.change_input_from_bgr_to_rgb", "line_number": 72, "usage_type": "call"}, {"api_name": "tools.special", "line_number": 72, "usage_type": "name"}, {"api_name": "tools.special.add_0_5_to_normalized_input", "line_number": 74, "usage_type": "call"}, {"api_name": "tools.special", "line_number": 74, "usage_type": "name"}, {"api_name": "tools.special.add_rgb2yynn_node", "line_number": 76, "usage_type": "call"}, {"api_name": "tools.special", "line_number": 76, "usage_type": "name"}, {"api_name": "tools.eliminating.remove_useless_last_nodes", "line_number": 80, "usage_type": "call"}, {"api_name": "tools.eliminating", "line_number": 80, "usage_type": "name"}, {"api_name": "tools.combo.postprocess", "line_number": 83, "usage_type": "call"}, {"api_name": "tools.combo", "line_number": 83, "usage_type": "name"}, {"api_name": "onnx.save", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "360425517", "text": "import collections\nimport xml.etree.ElementTree as etree\n\nMSG_STATS = \"Топ-{0} слов в новостях:\"\nFMT_STAT = \"{0}: {1}\"\n\nSTAT_MINLEN = 6\nSTAT_TOPCOUNT = 10\nSTAT_NEWSFILE = \"data/newsafr.xml\"\n\n\ndef news_source(newspath):\n with open(newspath, \"r\", encoding=\"utf-8-sig\") as newsfile:\n news_root = etree.parse(newsfile).getroot()\n return (d.text for d in news_root.findall(\"channel/item/description\"))\n\n\ndef wordstat(newspath, top_num, minlen):\n word_counter = collections.Counter()\n for news_descr in news_source(newspath):\n stat_words = (w.lower() for w in news_descr.split() if len(w) >= minlen)\n word_counter.update(stat_words)\n return word_counter.most_common(top_num)\n\n\nif __name__ == \"__main__\":\n print(MSG_STATS.format(STAT_TOPCOUNT))\n for st_word, st_count in wordstat(STAT_NEWSFILE, STAT_TOPCOUNT, STAT_MINLEN):\n print(FMT_STAT.format(st_word, st_count))\n", "sub_path": "xml_stat.py", "file_name": "xml_stat.py", "file_ext": "py", "file_size_in_byte": 925, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "xml.etree.ElementTree.parse", "line_number": 14, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 14, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "326131398", "text": "# -*- coding:utf-8 -*-\n#\n# Copyright @ 2019 OPS Inc.\n#\n# Author: Jinlong Yang\n#\n\nimport sqlalchemy.types\nfrom osmo.db import BASE\nfrom sqlalchemy import (\n Column,\n Boolean,\n Integer,\n String,\n DateTime,\n ForeignKey\n)\nfrom sqlalchemy.sql import func\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.dialects.postgresql import JSON\n\n\nclass LTree(sqlalchemy.types.UserDefinedType):\n\n def python_type(self):\n return basestring\n\n def get_col_spec(self):\n return 'LTREE'\n\n\nclass Tpl(BASE):\n\n __tablename__ = 'tb_tpl'\n\n id = Column(Integer, primary_key=True)\n name = Column(String(20), nullable=False, default='default')\n alias = Column(String(100), nullable=False)\n create_at = Column(DateTime, server_default=func.now())\n\n nodes = relationship('Node', back_populates='tpl')\n keys = relationship('Key', back_populates='tpl')\n\n def __repr__(self):\n return self.name\n\n\nclass Node(BASE):\n\n __tablename__ = 'tb_node'\n\n id = Column(Integer, primary_key=True)\n tpl_id = Column(Integer, ForeignKey('tb_tpl.id'), nullable=False)\n node = Column(LTree(), nullable=False, unique=True)\n name = Column(String(100), nullable=False)\n leaf = Column(Boolean, default=True)\n metainfo = Column(JSON)\n op = Column(String(30))\n rd = Column(String(30))\n create_at = Column(DateTime, server_default=func.now())\n update_at = Column(DateTime, onupdate=func.now())\n\n tpl = relationship('Tpl', back_populates='nodes')\n instances = relationship('Instance', back_populates='node')\n\n def __repr__(self):\n return self.node\n\n\nclass Instance(BASE):\n\n __tablename__ = 'tb_instance'\n\n id = Column(Integer, primary_key=True)\n node_id = Column(Integer, ForeignKey('tb_node.id'), nullable=False)\n ip = Column(String(20), nullable=False)\n hostname = Column(String(50), nullable=False)\n active = Column(Boolean, default=False)\n create_at = Column(DateTime, server_default=func.now())\n update_at = Column(DateTime, onupdate=func.now())\n\n node = relationship('Node', back_populates='instances')\n vals = relationship('Val', back_populates='instance')\n\n def __repr__(self):\n return self.ip\n\n\nclass Key(BASE):\n\n __tablename__ = 'tb_key'\n\n id = Column(Integer, primary_key=True)\n tpl_id = Column(Integer, ForeignKey('tb_tpl.id'), nullable=False)\n key = Column(String(50), nullable=False)\n create_at = Column(DateTime, server_default=func.now())\n\n tpl = relationship('Tpl', back_populates='keys')\n vals = relationship('Val', back_populates='key')\n\n def __repr__(self):\n return self.key\n\n\nclass Val(BASE):\n\n __tablename__ = 'tb_val'\n\n id = Column(Integer, primary_key=True)\n key_id = Column(Integer, ForeignKey('tb_key.id'), nullable=False)\n instance_id = Column(Integer, ForeignKey('tb_instance.id'), nullable=False)\n value = Column(String(100), nullable=False)\n create_at = Column(DateTime, server_default=func.now())\n\n key = relationship('Key', back_populates='vals')\n instance = relationship('Instance', back_populates='vals')\n\n def __repr__(self):\n return self.value\n", "sub_path": "stree/db/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 3149, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "sqlalchemy.types.types", "line_number": 23, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 23, "usage_type": "name"}, {"api_name": "osmo.db.BASE", "line_number": 32, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 36, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 36, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 37, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 37, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 38, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 38, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 39, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 39, "usage_type": "argument"}, {"api_name": "sqlalchemy.sql.func.now", "line_number": 39, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.func", "line_number": 39, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 41, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 42, "usage_type": "call"}, {"api_name": "osmo.db.BASE", "line_number": 48, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 52, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 52, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 53, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 53, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 53, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 54, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 55, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 55, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 56, "usage_type": "call"}, {"api_name": "sqlalchemy.Boolean", "line_number": 56, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 57, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql.JSON", "line_number": 57, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 58, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 58, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 59, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 59, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 60, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 60, "usage_type": "argument"}, {"api_name": "sqlalchemy.sql.func.now", "line_number": 60, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.func", "line_number": 60, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 61, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 61, "usage_type": "argument"}, {"api_name": "sqlalchemy.sql.func.now", "line_number": 61, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.func", "line_number": 61, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 63, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 64, "usage_type": "call"}, {"api_name": "osmo.db.BASE", "line_number": 70, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 74, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 74, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 75, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 75, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 75, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 76, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 76, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 77, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 77, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 78, "usage_type": "call"}, {"api_name": "sqlalchemy.Boolean", "line_number": 78, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 79, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 79, "usage_type": "argument"}, {"api_name": "sqlalchemy.sql.func.now", "line_number": 79, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.func", "line_number": 79, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 80, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 80, "usage_type": "argument"}, {"api_name": "sqlalchemy.sql.func.now", "line_number": 80, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.func", "line_number": 80, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 82, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 83, "usage_type": "call"}, {"api_name": "osmo.db.BASE", "line_number": 89, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 93, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 93, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 94, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 94, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 94, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 95, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 95, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 96, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 96, "usage_type": "argument"}, {"api_name": "sqlalchemy.sql.func.now", "line_number": 96, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.func", "line_number": 96, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 98, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 99, "usage_type": "call"}, {"api_name": "osmo.db.BASE", "line_number": 105, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 109, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 109, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 110, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 110, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 110, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 111, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 111, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 111, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 112, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 112, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 113, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 113, "usage_type": "argument"}, {"api_name": "sqlalchemy.sql.func.now", "line_number": 113, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.func", "line_number": 113, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 115, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 116, "usage_type": "call"}]} +{"seq_id": "147009234", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 20 12:15:21 2016\n\n@author: DanielM\n\"\"\"\n\nimport elephant.spike_train_generation as stg\nimport elephant.statistics as stat\nimport matplotlib.pyplot as plt #For plotting/debugging\nimport quantities as pq\nfrom neo.io import AxonIO\n\ndef analyse_AHP(spike_train_list, ch_signal = 0, decay_interval = 12 * pq.ms, \n outlier_correction = True):\n \"\"\"Analyses the AHP of all sweeps in the block by iterating .segments and\n returns a list with the same length as .segments containing the arrays\n that represent each spikes AHP. \"\"\"\n\n result_list = []\n \n for x in spike_train_list:\n result_list.append(stat.AHP_slope(x, decay_interval = decay_interval))\n\n if outlier_correction == True:\n for idx, x in enumerate(result_list):\n result_list[idx] = x[(x < (x.mean() + 5 * x.std())) * (x > (x.mean() - 5 * x.std()))]\n return result_list\n \ndef analyse_clustering(spike_train_list, ch_signal = 0, thr_isi = 60 * pq.ms):\n \"\"\"Analyses the clustering fraction of all sweeps in block by iterating .segments\n and returns a list of the clustering fractions of each sweep.\"\"\"\n \n result_list = []\n \n for x in spike_train_list:\n result_list.append(stat.clustering_fraction(x))\n \n return result_list\n\ndef analyse_spike_time(spike_train_list, normalization = 'on', output = 'mean'):\n \n result_list = []\n \n for x in spike_train_list:\n result_list.append(stat.spike_time(x,\n normalization = normalization,\n output = output))\n \n return result_list\n\ndef get_spike_train_list(block, threshold = 0.0 * pq.mV, sign = 'above',\n spike_train = None, extr_interval = (-2 * pq.ms, 4 * pq.ms),\n ch_signal = 0):\n \"\"\"Returns a list of spike trains extracted from block.segments.\"\"\"\n \n result_list = []\n \n for idx,x in enumerate(block.segments):\n result_list.append(stg.spike_extraction(x.analogsignals[ch_signal],\n threshold = threshold,\n sign = sign, \n spike_train = spike_train,\n extr_interval = extr_interval))\n return result_list\n\ndef analyse_half_width(spike_train_list, output = 'mean'):\n \n result_list = []\n \n for x in spike_train_list:\n result_list.append(stat.half_width(x, output = 'output'))\n \n return result_list\n\ndef analyse_mean_firing_rate(spike_train_list):\n \n result_list = []\n\n for x in spike_train_list:\n result_list.append(stat.mean_firing_rate(x))\n \n return result_list\n \ndef quick_load(path):\n r = AxonIO(path)\n myFile = r.read_block()\n return myFile\n\nif __name__ == '__main__':\n \n parent_folder = \"Y:\\DanielM\\Voltage_clamp_of_hilar_cells_and_bipolar_SL_stim_2016\"\n \n animal_folder = \"2016_04_14_ID0944\"\n \n slice_folder = \"S1\"\n\n hyp_steps_file = \"2016_04_14_0000.abf\"\n\n dep_steps_file = \"2016_04_14_0001.abf\"\n\n full_path_dep = parent_folder + \"\\\\\" + animal_folder + \"\\\\\" + slice_folder + \"\\\\\" + dep_steps_file\n\n fileBlock = quick_load(full_path_dep)\n spike_train_list = get_spike_train_list(fileBlock)\n spike_train_list_AHP = get_spike_train_list(fileBlock,\n extr_interval = (4 * pq.ms, 14 * pq.ms))\n \n #AHP_list = analyse_AHP(spike_train_list_AHP, outlier_correction = False)\n \n #clustering_list = analyse_clustering(spike_train_list)\n \n #spike_time_list = analyse_spike_time(spike_train_list, output = 'array')\n \n #half_width_list = analyse_half_width(spike_train_list, output = 'array')\n \n #firing_rate_list = analyse_mean_firing_rate(spike_train_list)", "sub_path": "IntrinsicPropertiesHC.py", "file_name": "IntrinsicPropertiesHC.py", "file_ext": "py", "file_size_in_byte": 3969, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "quantities.ms", "line_number": 14, "usage_type": "attribute"}, {"api_name": "elephant.statistics.AHP_slope", "line_number": 23, "usage_type": "call"}, {"api_name": "elephant.statistics", "line_number": 23, "usage_type": "name"}, {"api_name": "quantities.ms", "line_number": 30, "usage_type": "attribute"}, {"api_name": "elephant.statistics.clustering_fraction", "line_number": 37, "usage_type": "call"}, {"api_name": "elephant.statistics", "line_number": 37, "usage_type": "name"}, {"api_name": "elephant.statistics.spike_time", "line_number": 46, "usage_type": "call"}, {"api_name": "elephant.statistics", "line_number": 46, "usage_type": "name"}, {"api_name": "quantities.mV", "line_number": 52, "usage_type": "attribute"}, {"api_name": "quantities.ms", "line_number": 53, "usage_type": "attribute"}, {"api_name": "elephant.spike_train_generation.spike_extraction", "line_number": 60, "usage_type": "call"}, {"api_name": "elephant.spike_train_generation", "line_number": 60, "usage_type": "name"}, {"api_name": "elephant.statistics.half_width", "line_number": 72, "usage_type": "call"}, {"api_name": "elephant.statistics", "line_number": 72, "usage_type": "name"}, {"api_name": "elephant.statistics.mean_firing_rate", "line_number": 81, "usage_type": "call"}, {"api_name": "elephant.statistics", "line_number": 81, "usage_type": "name"}, {"api_name": "neo.io.AxonIO", "line_number": 86, "usage_type": "call"}, {"api_name": "quantities.ms", "line_number": 107, "usage_type": "attribute"}]} +{"seq_id": "2737264", "text": "# -*- coding: utf-8 -*-\n\nimport cv2\nimport sys\nimport numpy as np\n\ndef scale_image(infile, size=(48, 48), outfile=None):\n # read image\n img = cv2.imread(infile)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = cv2.bitwise_not(img)\n\n h, w = img.shape[:2]\n sh, sw = size\n\n # aspect ratio of image\n aspect = w/h\n\n # padding\n pad = [0, 0, 0, 0] # (top, left, bottom, right)\n\n new_h, new_w = sh, sw\n\n # compute scaling and pad sizing\n if aspect > 1: # horizontal image\n new_w = sw\n new_h = np.round(new_w/aspect).astype(int)\n pad_vert = (sh-new_h)/2\n pad[0] = np.floor(pad_vert).astype(int)\n pad[2] = np.ceil(pad_vert).astype(int)\n\n elif aspect < 1: # vertical image\n new_h = sh\n new_w = np.round(new_h*aspect).astype(int)\n pad_horz = (sw-new_w)/2\n pad[1] = np.floor(pad_horz).astype(int)\n pad[3] = np.ceil(pad_horz).astype(int)\n\n # scale and pad\n img = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_AREA)\n img = cv2.copyMakeBorder(img, pad[0], pad[2], pad[1], pad[3], borderType=cv2.BORDER_CONSTANT, value=0)\n\n # increase contrast\n img[img > 0] = 255\n\n # display or save as npy\n if not outfile:\n cv2.imshow('scaled image', img)\n cv2.waitKey(0)\n else:\n np.save(outfile, img)\n\nif __name__ == '__main__':\n scale_image(infile=sys.argv[1], outfile='sample.npy')\n", "sub_path": "magenta/magenta/models/sketch_rnn/preprocessing/preprocess_3_npys.py", "file_name": "preprocess_3_npys.py", "file_ext": "py", "file_size_in_byte": 1426, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "cv2.imread", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 10, "usage_type": "attribute"}, {"api_name": "cv2.bitwise_not", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.INTER_AREA", "line_number": 40, "usage_type": "attribute"}, {"api_name": "cv2.copyMakeBorder", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.BORDER_CONSTANT", "line_number": 41, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 51, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 54, "usage_type": "attribute"}]} +{"seq_id": "206128324", "text": "#!/usr/bin/env python\n\n\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom builtins import open\nfrom future import standard_library\nstandard_library.install_aliases()\nimport numpy as np \nfrom utils.plotting.standard_modules import *\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nif __name__=='__main__':\n\n\timport argparse\n\timport pickle\n\timport scipy.stats as scp\n\n\tparser = argparse.ArgumentParser('plot TS and fitted value distributions for all modified poissonian llhs')\n\n\tparser.add_argument('-is','--input-stem',help='input pickle file stem name',default = 'pseudo_exp_llh')\n\tparser.add_argument('-l','--list',help='list of llh functions you want to plot',nargs='+')\n\tparser.add_argument('-o','--output',help='output pdf file',default='test.pdf')\n\n\targs= parser.parse_args()\n\n\toutpdf = PdfPages(args.output)\n\n\tcoverage_fig = Figure(figsize=(10,10))\n\n\tsample_chi2_distrib = np.random.chisquare(size=100,df=1)\n\tbinning = np.linspace(0,20,31)\n\n\n\tfor llh_name in args.list:\n\t\tprint(('plotting ',llh_name))\n\n\t\tassert llh_name in ['modchi2','SAY','dima','glu2','barlow'],'ERROR: Available likelihood functions are: glu2 modchi2 dima SAY barlow'\n\n\t\tpckl_name = args.input_stem+'_'+llh_name+'.pckl'\n\t\tassert os.path.isfile(pckl_name),'ERROR: file %s not found'%(pckl_name)\n\n\t\t#\n\t\t# Load the pickle file containing information about pseudo-experiments\n\t\t#\n\t\tindata = pickle.load(open(pckl_name))\n\n\t\tcontainer_TS_truth_high = []\n\t\tcontainer_TS_truth_low = []\n\t\tcontainer_TS_lowstat = []\n\t\tcontainer_TS_highstat = []\n\t\tbias = []\n\n\t\tval_truth = 20.\n\t\tcontainer_val_lowstat = []\n\t\tcontainer_val_highstat = []\n\n\t\tfor pseudo_exp in indata:\n\n\t\t\tval_low = pseudo_exp['lowstats_opt']['x']\n\t\t\tval_high =pseudo_exp['highstats_opt']['x']\n\t\t\tTS_low = -pseudo_exp['lowstats_opt']['fun']\n\t\t\tTS_high = -pseudo_exp['highstats_opt']['fun']\n\t\t\ttruth_low = pseudo_exp['lowstat_llh']\n\t\t\ttruth_high = pseudo_exp['truth_llh']\n\n\n\t\t\tif (np.isfinite(val_low)) and (np.isfinite(val_high)) and (np.isfinite(TS_low)) and (np.isfinite(TS_high)) and (np.isfinite(truth_low)) and (np.isfinite(truth_high)):\n\n\n\t\t\t\tif len(val_low.shape)>=1:\n\t\t\t\t\tval_low = val_low[0]\n\t\t\t\tif len(val_high.shape)>=1:\n\t\t\t\t\tval_high = val_high[0]\n\n\t\t\t\tcontainer_val_lowstat.append(float(val_low))\n\t\t\t\tcontainer_val_highstat.append(float(val_high))\n\n\t\t\t\tcontainer_TS_lowstat.append(2*np.abs(TS_low-truth_low))\n\t\t\t\tcontainer_TS_highstat.append(2*np.abs(TS_high-truth_high))\n\t\t\t\tcontainer_TS_truth_high.append(truth_high)\n\t\t\t\tcontainer_TS_truth_low.append(truth_low)\n\n\t\t\t\tbias.append( 2*((TS_low-truth_low)-(TS_high-truth_high)) )\n\t\t\telse:\n\t\t\t\tcontinue\n\n\n\t\t\n\n\t\tfig = Figure(nx=2,ny=3,figsize=(20,30))\n\t\tfig.get_ax(x=0,y=0).set_title(llh_name)\n\t\tfig.get_ax(x=0,y=0).hist(container_TS_highstat,bins=binning,histtype='step',linewidth=2.,color='r',label='TS distribution')\n\t\tfig.get_ax(x=0,y=0).hist(sample_chi2_distrib,bins=binning,histtype='step',linewidth=2.,color='k',label=r'$\\chi^{2}_{dof=1}$')\n\t\tfig.get_ax(x=0,y=0).set_xlabel(r'$-2(LLH_{opt}-LLH_{truth})$ (High statistics case)')\n\t\tfig.get_ax(x=0,y=0).legend()\n\n\n\t\tfig.get_ax(x=0,y=1).set_title(llh_name)\n\t\tfig.get_ax(x=0,y=1).hist(container_TS_lowstat,bins=binning,histtype='step',linewidth=2.,color='b',label='TS distribution')\n\t\tfig.get_ax(x=0,y=1).hist(sample_chi2_distrib,bins=binning,histtype='step',linewidth=2.,color='k',label=r'$\\chi^{2}_{dof=1}$')\n\t\tfig.get_ax(x=0,y=1).set_xlabel(r'$-2(LLH_{opt}-LLH_{truth})$ (Low statistics case)')\n\t\tfig.get_ax(x=0,y=1).legend()\n\n\t\tfig.get_ax(x=1,y=0).set_title(llh_name)\n\t\tfig.get_ax(x=1,y=0).hist(container_val_highstat,bins=20,histtype='step',linewidth=2.,color='r')\n\t\tfig.get_ax(x=1,y=0).axvline(x=20,linewidth=2,color='k',ls='--',label=r'Truth ($\\mu = 20$')\n\t\tfig.get_ax(x=1,y=0).set_xlabel('value (High statistics case)')\n\t\tfig.get_ax(x=1,y=0).legend()\n\n\n\t\tfig.get_ax(x=1,y=1).set_title(llh_name)\n\t\tfig.get_ax(x=1,y=1).hist(container_val_lowstat,bins=20,histtype='step',linewidth=2.,color='b')\n\t\tfig.get_ax(x=1,y=1).axvline(x=20,linewidth=2,color='k',ls='--',label=r'Truth ($\\mu = 20$')\n\t\tfig.get_ax(x=1,y=1).set_xlabel('Value (Low statistics case)')\n\t\tfig.get_ax(x=1,y=1).legend()\n\n\n\n\t\tfig.get_ax(x=0,y=2).set_title(llh_name)\n\t\tfig.get_ax(x=0,y=2).hist(bias,bins=20)\n\t\tfig.get_ax(x=0,y=2).set_xlabel('Bias')\n\t\toutpdf.savefig(fig.fig)\n\n\t\t#\n\t\t# Coverage test\n\t\t#\n\t\tcoverage_y = []\n\t\tcoverage_x = np.linspace(0.0,1.0,101)\n\n\t\tfor percent_coverage in coverage_x:\n\t\t\tchi2_TS_value = scp.chi2.ppf(percent_coverage,df=1)\n\t\t\tactual_coverage = sum(np.array(container_TS_lowstat)<=chi2_TS_value)/float(len(container_TS_lowstat))\n\t\t\tcoverage_y.append(actual_coverage)\n\n\n\n\t\tcoverage_fig.get_ax().plot(coverage_x,coverage_y,label=llh_name)\n\n\n\n\tcoverage_fig.get_ax().set_xlabel('Expected Wilks coverage')\n\tcoverage_fig.get_ax().set_ylabel('Actual Coverage (low statistics')\n\tcoverage_fig.get_ax().legend()\n\toutpdf.savefig(coverage_fig.fig)\n\n\n\toutpdf.close()\n", "sub_path": "poisson_llh_test_plots.py", "file_name": "poisson_llh_test_plots.py", "file_ext": "py", "file_size_in_byte": 5002, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "future.standard_library.install_aliases", "line_number": 10, "usage_type": "call"}, {"api_name": "future.standard_library", "line_number": 10, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_pdf.PdfPages", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.random.chisquare", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 33, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 34, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 48, "usage_type": "call"}, {"api_name": "builtins.open", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.isfinite", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 131, "usage_type": "call"}, {"api_name": "scipy.stats.chi2.ppf", "line_number": 134, "usage_type": "call"}, {"api_name": "scipy.stats.chi2", "line_number": 134, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 134, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 135, "usage_type": "call"}]} +{"seq_id": "136191308", "text": "from flask import render_template\nfrom flask_login import login_required, current_user\nfrom flask import request, redirect, render_template, url_for, flash, current_app as app\nfrom flask_pymongo import PyMongo\nfrom .forms import ProjectForm, ProjectStepForm\nfrom ..account.forms import InviteForm\nfrom .models import Project, ProjectStep\nfrom ..account.models import User, Template, TemplateStep, Account\nfrom bson import ObjectId\nfrom ..utils import s3_upload, s3_retrieve, send_sms, send_email\nfrom ..helpers import flash_errors, confirm_token, send_invitation, distro, pretty_date\nfrom ..decorators import admin_login_required\nimport datetime\nimport json\n\nfrom . import project\n\n@project.route('/projects')\n@login_required\n@admin_login_required\ndef projects():\n\n if request.args.get('sort') == 'closing':\n sort = 'close_date'\n order = 1\n elif request.args.get('sort') == 'created':\n sort = 'create_date'\n order = -1\n elif request.args.get('sort') == 'inactive':\n sort = 'update_date'\n order = 1\n else:\n sort = 'update_date'\n order = -1\n\n if request.args.get('completed') == 'true':\n complete = True\n else:\n complete = False\n\n projects = Project.all(active=True, complete=complete, sort=sort, order=order)\n count = projects.count(True)\n return render_template('project/projects.html', projects=projects, count=count, title=\"Welcome\")\n\n@project.route('/projects/add', methods=['GET', 'POST'])\n@login_required\n@admin_login_required\ndef add_project():\n form = ProjectForm()\n if request.method == 'GET':\n templates = Template.all(current_user.get_account())\n form.template.choices = [(\"111111111111111111111111\", \"Use a template...\")] + [(template['_id'], template['name']) for template in templates]\n\n if request.method == 'POST' and form.validate_on_submit():\n if form.photo.data:\n s3_filepath = s3_upload(form.photo, 'photo')\n else:\n s3_filepath = None\n\n if form.close_date.data is None:\n date_time = ''\n elif form.close_time.data is None:\n date_time = datetime.datetime.combine(form.close_date.data, datetime.time.min)\n else:\n date_time = datetime.datetime.combine(form.close_date.data, form.close_time.data)\n\n project = Project(form.name.data, form.address1.data, \\\n form.address2.data, form.city.data, form.state.data, form.zip.data, \\\n date_time, photo=s3_filepath)\n project_id = project.add()\n\n # Add user's template steps to new project\n template_steps = list(TemplateStep.all(form.template.data))\n template_steps_count = template_steps.count(True)\n for template_step in template_steps:\n # takes the account steps and derives the new date based on the close date\n if 'days_before_close' in template_step['steps'] and form.close_date.data:\n days_before_close = template_step['steps']['days_before_close']\n\n if days_before_close:\n due_date = form.close_date.data - datetime.timedelta(days=days_before_close)\n due_date_time = datetime.datetime.combine(due_date, datetime.datetime.min.time())\n else:\n due_date_time = None\n else:\n due_date_time = None\n\n name = template_step['steps']['name'] if 'name' in template_step['steps'] else None\n notes = template_step['steps']['notes'] if 'notes' in template_step['steps'] else None\n\n project_step = ProjectStep(project_id, name=name, notes=notes, due_date=due_date_time, status='red')\n project_step.add()\n flash(\"Successfully created %s with %s steps\" % (form.name.data, template_steps_count), category='success')\n return redirect(url_for('project.project_steps', id=project_id))\n else:\n flash_errors(form)\n return render_template('project/project.html', id=[], form=form)\n\n@project.route('/projects/edit/', methods=['GET', 'POST'])\n@login_required\n@admin_login_required\ndef edit_project(id):\n form = ProjectForm()\n project = Project.get(id)\n\n if request.method == 'GET':\n form.name.data = project['name']\n form.address1.data = project['address1']\n form.address2.data = project['address2']\n form.city.data = project['city']\n form.state.data = project['state']\n form.zip.data = project['zip']\n form.close_date.data = project['close_date'] if project['close_date'] else None\n form.close_time.data = project['close_date'] if project['close_date'] and (project['close_date'].hour != 0 and project['close_date'] != 0) else None\n photo = project['photo'] if 'photo' in project else None\n\n completed = True if 'complete_date' in project else False\n\n return render_template('project/project.html', id=id, completed=completed, form=form, photo=photo)\n\n if request.method == 'POST' and form.validate_on_submit():\n if form.photo.data:\n s3_filepath = s3_upload(form.photo, 'photo')\n else:\n s3_filepath = None\n\n if form.close_date.data is None:\n date_time = ''\n elif form.close_time.data is None:\n date_time = datetime.datetime.combine(form.close_date.data, datetime.time.min)\n else:\n date_time = datetime.datetime.combine(form.close_date.data, form.close_time.data)\n\n Project.update(id, form.name.data, form.address1.data, \\\n form.address2.data, form.city.data, form.state.data, form.zip.data, \\\n date_time, photo=s3_filepath)\n\n # compare changes to provide details in text/email\n if project['close_date']:\n db_close_date = project['close_date'].replace(tzinfo=None)\n else:\n db_close_date = None\n\n if date_time != db_close_date and form.close_date.data:\n # build body of email/text based on what changed and email/text only if changes\n email_body = \"You're closing date has been updated to \" + pretty_date(date_time) + \"

\"\n text_body = \"You're closing date has been updated to \" + pretty_date(date_time) + \".\\n\\n\"\n\n email_body = email_body + \"
Login for more details: \" + url_for('account.login', _external=True)\n text_body = text_body + \"\\nLogin here: \" + url_for('account.login', _external=True)\n\n # then send email updates only if there are changes\n email_users = User.all(project=id, email_alert=True)\n email_distro = distro(email_users, 'email')\n if email_distro:\n send_email(email_distro, \"You're project has been updated\", email_body)\n\n # send text update\n text_users = User.all(project=id, text_alert=True)\n text_distro = distro(text_users, 'cell')\n if text_distro:\n send_sms(text_distro, text_body)\n # otherwise don't send an email or text if closing date didn't change\n\n flash(\"Updated project\", category='success')\n return redirect(url_for('project.project_steps', id=id))\n else:\n flash_errors(form)\n return render_template('project/project.html', id=id, form=form)\n\n@project.route('/photo/', methods=['GET'])\n@login_required\ndef get_photo(photo):\n return redirect(s3_retrieve(photo, 'photo'))\n\n@project.route('/projects/delete/', methods=['GET', 'POST'])\n@login_required\n@admin_login_required\ndef delete_project(id):\n Project.delete(id)\n flash(\"Project deleted\", category='success')\n return redirect(url_for('project.projects'))\n\n@project.route('/projects/complete/', methods=['GET', 'POST'])\n@login_required\n@admin_login_required\ndef complete_project(id):\n Project.complete(id)\n flash(\"Congrats! Your project has been closed\", category='success')\n return redirect(url_for('project.projects'))\n\n@project.route('/projects/reactivate/', methods=['GET', 'POST'])\n@login_required\n@admin_login_required\ndef reactivate_project(id):\n Project.reactivate(id)\n flash(\"Project has been reactivated\", category='success')\n return redirect(url_for('project.projects'))\n\n@project.route('/projects//steps')\n@login_required\n@admin_login_required\ndef project_steps(id):\n project_steps = list(ProjectStep.all(id, active=True, include_complete=True))\n if not project_steps:\n project_steps = []\n\n # have to convert to list so i can iterate over users and pass users to template\n users = list(User.all(project=id))\n clients_count = sum(1 for user in users if user['role']=='client')\n partners_count = sum(1 for user in users if user['role']=='partner')\n\n project = Project.get(id)\n realtor = User.get(accounts_realtor=current_user.get_account())\n\n if project['close_date']:\n days_left = (project['close_date'].date() - datetime.datetime.now().date()).days\n if days_left < 0:\n days_left = 0\n else:\n days_left = -1\n return render_template('project/projectsteps.html', id=id, project_steps=project_steps, users=users, project=project, realtor=realtor, days_left=days_left, partners_count=partners_count, clients_count=clients_count)\n\n@project.route('/projects//steps/add', methods=['GET', 'POST'])\n@login_required\n@admin_login_required\ndef add_project_step(id):\n form = ProjectStepForm()\n if request.method == 'POST' and form.validate_on_submit():\n if form.attachment.data:\n s3_filepath = s3_upload(form.attachment, 'attachment')\n else:\n s3_filepath = None\n\n if form.due_date.data is None:\n date_time = ''\n elif form.time.data is None:\n date_time = datetime.datetime.combine(form.due_date.data, datetime.time.min)\n else:\n date_time = datetime.datetime.combine(form.due_date.data, form.time.data)\n\n project_step = ProjectStep(project_id=id, name=form.name.data, \\\n notes=form.notes.data, attachment=s3_filepath, due_date=date_time, \\\n status = form.status.data)\n project_step.add()\n\n # build body of email/text\n email_body = \"A project step '\" + form.name.data + \"' has been added.

\"\n text_body = \"A project step '\" + form.name.data + \"' has been added.\\n\\n\"\n\n if date_time:\n email_body = email_body + \"Scheduled Date: \" + pretty_date(date_time) + \"
\"\n text_body = text_body + \"Scheduled Date: \" + pretty_date(date_time) + \"\\n\"\n if s3_filepath:\n email_body = email_body + \"Attachment: Added
\"\n text_body = text_body + \"Attachment: Added\\n\"\n\n email_body = email_body + \"
Login for more details: \" + url_for('account.login', _external=True)\n text_body = text_body + \"\\nLogin here: \" + url_for('account.login', _external=True)\n\n # then send email updates only if there are changes\n email_users = User.all(project=id, email_alert=True)\n email_distro = distro(email_users, 'email')\n if email_distro:\n send_email(email_distro, \"You're project has been updated\", email_body)\n\n # send text update\n text_users = User.all(project=id, text_alert=True)\n text_distro = distro(text_users, 'cell')\n if text_distro:\n send_sms(text_distro, text_body)\n\n flash(\"Successfully added project step\", category='success')\n return redirect(url_for('project.project_steps', id=id))\n else:\n flash_errors(form)\n return render_template('project/projectstep.html', id=id, project_step=[], form=form)\n\n@project.route('/projects//steps/edit/', methods=['GET', 'POST'])\n@login_required\n@admin_login_required\ndef edit_project_step(id, step_id):\n form = ProjectStepForm()\n project_step = ProjectStep.get(id, step_id)\n\n if request.method == 'GET':\n form.name.data = project_step['steps'][0]['name']\n form.notes.data = project_step['steps'][0]['notes']\n form.due_date.data = project_step['steps'][0]['due_date'] if project_step['steps'][0]['due_date'] else None\n form.time.data = project_step['steps'][0]['due_date'] if project_step['steps'][0]['due_date'] and (project_step['steps'][0]['due_date'].hour != 0 and project_step['steps'][0]['due_date'] != 0) else None\n form.status.data = project_step['steps'][0]['status'] if 'status' in project_step['steps'][0] else 'Red'\n attachment = project_step['steps'][0]['attachment']\n completed = project_step['steps'][0]['complete_date'] if 'complete_date' in project_step['steps'][0] else False\n\n return render_template('project/projectstep.html', form=form, attachment=attachment, id=id, step_id=step_id, completed=completed)\n\n if request.method == 'POST' and form.validate_on_submit():\n if form.attachment.data:\n s3_filepath = s3_upload(form.attachment, 'attachment')\n else:\n s3_filepath = None\n\n if form.due_date.data is None:\n date_time = ''\n elif form.time.data is None:\n date_time = datetime.datetime.combine(form.due_date.data, datetime.time.min)\n else:\n date_time = datetime.datetime.combine(form.due_date.data, form.time.data)\n\n # update project step\n ProjectStep.update(id=id, step_id=step_id, name=form.name.data, \\\n notes=form.notes.data, attachment=s3_filepath, due_date=date_time, \\\n status=form.status.data)\n\n # compare changes to provide details in text/email\n name_changed = False if form.name.data == project_step['steps'][0]['name'] else True\n notes_changed = False if form.notes.data == project_step['steps'][0]['notes'] else True\n status_changed = False if form.status.data == project_step['steps'][0]['status'] else True\n attachment_changed = False if not s3_filepath else True\n\n # 5 scenarios for dates\n #1 same date to same date - don't send\n #2 date existed to new date - send\n #3 date existed to no date - don't send\n #4 no date to new date - send\n #5 no date to no date - don't send\n\n # check if an old date existed\n # setting to variable makes it cleaner to read\n if project_step['steps'][0]['due_date']:\n old_date = True\n else:\n old_date = False\n\n # check if a new date exists to tell us if we should text/email\n if date_time:\n new_date = True\n else:\n new_date = False\n\n due_date_changed = False\n\n # if there was a date and there is a new date\n if old_date and new_date:\n # compare dates\n #1 if the same don't do anything\n if date_time == project_step['steps'][0]['due_date'].replace(tzinfo=None):\n due_date_changed = False\n #2 otherwise we need to send alert\n else:\n due_date_changed = True\n #3 otherwise if there was a date but the date was removed\n elif old_date and not new_date:\n due_date_changed = False\n #4\n elif not old_date and new_date:\n due_date_changed = True\n #5\n elif not old_date and not new_date:\n due_date_changed = False\n\n # build body of email/text based on what changed and email/text only if changes\n if due_date_changed or attachment_changed:\n if name_changed:\n email_body = \"You're project step \\'\" + project_step['steps'][0]['name'] + \\\n \"\\' has been updated to '\" + form.name.data + \"\\'.

\"\n text_body = \"A project step '\" + form.name.data + \"' has been updated.\\n\\n\"\n else:\n email_body = \"You're project step '\" + form.name.data + \"' has been updated.

\"\n text_body = \"A project step '\" + form.name.data + \"' has been updated.\\n\\n\"\n\n email_body = email_body + \" The following changes were updated:
\"\n\n if notes_changed:\n email_body = email_body + \"Notes: \" + form.notes.data + \"
\"\n text_body = text_body + \"Notes: Updated\\n\"\n if due_date_changed:\n email_body = email_body + \"Scheduled Date: \" + pretty_date(date_time) + \"
\"\n text_body = text_body + \"Scheduled Date: \" + pretty_date(date_time) + \"\\n\"\n if status_changed:\n email_body = email_body + \"Status: \" + form.status.data.capitalize() + \"
\"\n text_body = text_body + \"Status: \" + form.status.data.capitalize() + \"\\n\"\n if attachment_changed:\n email_body = email_body + \"Attachment: Added
\"\n text_body = text_body + \"Attachment: Added\\n\"\n\n email_body = email_body + \"
Login for more details: \" + url_for('account.login', _external=True)\n text_body = text_body + \"\\nLogin here: \" + url_for('account.login', _external=True)\n\n # then send email updates only if there are changes\n email_users = User.all(project=id, email_alert=True)\n email_distro = distro(email_users, 'email')\n if email_distro:\n send_email(email_distro, \"You're project has been updated\", email_body)\n\n # send text update\n text_users = User.all(project=id, text_alert=True)\n text_distro = distro(text_users, 'cell')\n if text_distro:\n send_sms(text_distro, text_body)\n # otherwise don't send an email or text if nothing changed\n flash(\"Successfully updated project step\", category='success')\n return redirect(url_for('project.project_steps', id=id))\n else:\n flash_errors(form)\n return redirect(url_for('project.edit_project_step', id=id, project_step=project_step, step_id=step_id))\n\n\n@project.route('/attachment/', methods=['GET'])\n@login_required\ndef get_attachment(attachment):\n return redirect(s3_retrieve(attachment, 'attachment'))\n\n@project.route('/projects//steps/delete/', methods=['GET', 'POST'])\n@login_required\n@admin_login_required\ndef delete_project_step(id, step_id):\n ProjectStep.delete(id, step_id)\n return redirect(url_for('project.project_steps', id=id))\n\n@project.route('/projects//steps/complete/', methods=['GET', 'POST'])\n@login_required\n@admin_login_required\ndef complete_project_step(id, step_id):\n ProjectStep.complete(id, step_id)\n return redirect(url_for('project.project_steps', id=id))\n\n@project.route('/projects//steps/sort', methods=['POST'])\n@login_required\ndef sort_project_step(id):\n ProjectStep.sort(id, request.form['order'])\n return json.dumps({'status':'Successfully sorted'})\n\n### adding a project viewer (client/partner) ###\n@project.route('/projects///invite', methods=['GET', 'POST'])\n@login_required\ndef invite_viewer(id, role):\n form = InviteForm()\n\n if request.method == 'GET':\n return render_template('project/viewer.html', id=id, user=[], user_role=role, form=form)\n\n if request.method == 'POST' and form.validate_on_submit():\n existing_user = User.get(email=form.email.data)\n realtor = User.get(accounts_realtor=current_user.get_account())\n\n try:\n if existing_user is None:\n send_invitation(form.email.data, realtor=realtor, new_user=True)\n\n if role == 'partner': # it's a partner\n text_alert = False\n email_alert = False\n else: # it's a client\n text_alert = False ## Update this line to True when we buy Twilio\n email_alert = True\n\n User.add(form.email.data, form.first_name.data, form.last_name.data, \\\n current_user.get_account(), role, invited_by=current_user.get_id(), \\\n confirmed=False, project=[id], email_alert=email_alert, text_alert=text_alert, \\\n partner_type=form.partner_type.data)\n else:\n send_invitation(form.email.data, realtor=realtor, new_user=False)\n User.update(existing_user['_id'], form.email.data, project=id)\n\n flash(\"Invitation sent\", category='success')\n return redirect(url_for('project.project_steps', id=id))\n except:\n flash(\"Error inviting viewer\", category='danger')\n return render_template('project/viewer.html', id=id, form=form)\n else:\n flash_errors(form)\n return render_template('project/viewer.html', id=id, user=[], form=form)\n\n@project.route('/projects//viewers/edit/', methods=['GET', 'POST'])\n@login_required\ndef edit_viewer(id, viewer_id):\n form = InviteForm()\n user = User.get(id=viewer_id)\n user_role = user['role']\n\n if request.method == 'GET':\n form.first_name.data = user['first_name']\n form.last_name.data = user['last_name']\n form.email.data = user['email']\n form.cell.data = user['cell']\n form.partner_type.data = user['partner_type'] if 'partner_type' in user else None\n\n return render_template('project/viewer.html', id=id, user=user, user_role=user_role, form=form)\n\n if request.method == 'POST' and form.validate_on_submit():\n try:\n realtor = User.get(accounts_realtor=current_user.get_account())\n print(form.partner_type.data)\n User.update(viewer_id, form.email.data, form.first_name.data, form.last_name.data, form.cell.data, partner_type=form.partner_type.data)\n send_invitation(form.email.data, realtor=realtor, new_user=True)\n flash(\"Invitation resent\", category='success')\n except:\n flash(\"Error inviting viewer\", category='danger')\n return render_template('project/viewer.html', form=form, user=user, user_role=user_role)\n\n return redirect(url_for('project.project_steps', id=id))\n else:\n flash_errors(form)\n return render_template('project/viewer.html', id=id, user=[], user_role=user_role, form=form)\n\n@project.route('/projects//viewers/delete/', methods=['GET', 'POST'])\n@login_required\ndef delete_viewer(id, viewer_id):\n User.delete(id=viewer_id, context='viewer', project=id)\n flash(\"User removed succesfully\", category='success')\n return redirect(url_for('project.project_steps', id=id))\n", "sub_path": "app/project/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 22579, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "flask.request.args.get", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 26, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 26, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 26, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 36, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 36, "usage_type": "name"}, {"api_name": "models.Project.all", "line_number": 41, "usage_type": "call"}, {"api_name": "models.Project", "line_number": 41, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 43, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 19, "usage_type": "name"}, {"api_name": "decorators.admin_login_required", "line_number": 20, "usage_type": "name"}, {"api_name": "forms.ProjectForm", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 50, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 50, "usage_type": "name"}, {"api_name": "account.models.Template.all", "line_number": 51, "usage_type": "call"}, {"api_name": "account.models.Template", "line_number": 51, "usage_type": "name"}, {"api_name": "flask_login.current_user.get_account", "line_number": 51, "usage_type": "call"}, {"api_name": "flask_login.current_user", "line_number": 51, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 54, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 54, "usage_type": "name"}, {"api_name": "utils.s3_upload", "line_number": 56, "usage_type": "call"}, {"api_name": "datetime.datetime.combine", "line_number": 63, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 63, "usage_type": "attribute"}, {"api_name": "datetime.time", "line_number": 63, "usage_type": "attribute"}, {"api_name": "datetime.datetime.combine", "line_number": 65, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 65, "usage_type": "attribute"}, {"api_name": "models.Project", "line_number": 67, "usage_type": "call"}, {"api_name": "account.models.TemplateStep.all", "line_number": 73, "usage_type": "call"}, {"api_name": "account.models.TemplateStep", "line_number": 73, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 81, "usage_type": "call"}, {"api_name": "datetime.datetime.combine", "line_number": 82, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 82, "usage_type": "attribute"}, {"api_name": "datetime.datetime.min.time", "line_number": 82, "usage_type": "call"}, {"api_name": "models.ProjectStep", "line_number": 91, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 93, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 94, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 94, "usage_type": "call"}, {"api_name": "helpers.flash_errors", "line_number": 96, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 97, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 46, "usage_type": "name"}, {"api_name": "decorators.admin_login_required", "line_number": 47, "usage_type": "name"}, {"api_name": "forms.ProjectForm", "line_number": 103, "usage_type": "call"}, {"api_name": "models.Project.get", "line_number": 104, "usage_type": "call"}, {"api_name": "models.Project", "line_number": 104, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 106, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 106, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 119, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 121, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 121, "usage_type": "name"}, {"api_name": "utils.s3_upload", "line_number": 123, "usage_type": "call"}, {"api_name": "datetime.datetime.combine", "line_number": 130, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 130, "usage_type": "attribute"}, {"api_name": "datetime.time", "line_number": 130, "usage_type": "attribute"}, {"api_name": "datetime.datetime.combine", "line_number": 132, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 132, "usage_type": "attribute"}, {"api_name": "models.Project.update", "line_number": 134, "usage_type": "call"}, {"api_name": "models.Project", "line_number": 134, "usage_type": "name"}, {"api_name": "helpers.pretty_date", "line_number": 146, "usage_type": "call"}, {"api_name": "helpers.pretty_date", "line_number": 147, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 149, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 150, "usage_type": "call"}, {"api_name": "account.models.User.all", "line_number": 153, "usage_type": "call"}, {"api_name": "account.models.User", "line_number": 153, "usage_type": "name"}, {"api_name": "helpers.distro", "line_number": 154, "usage_type": "call"}, {"api_name": "utils.send_email", "line_number": 156, "usage_type": "call"}, {"api_name": "account.models.User.all", "line_number": 159, "usage_type": "call"}, {"api_name": "account.models.User", "line_number": 159, "usage_type": "name"}, {"api_name": "helpers.distro", "line_number": 160, "usage_type": "call"}, {"api_name": "utils.send_sms", "line_number": 162, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 165, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 166, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 166, "usage_type": "call"}, {"api_name": "helpers.flash_errors", "line_number": 168, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 169, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 100, "usage_type": "name"}, {"api_name": "decorators.admin_login_required", "line_number": 101, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 174, "usage_type": "call"}, {"api_name": "utils.s3_retrieve", "line_number": 174, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 172, "usage_type": "name"}, {"api_name": "models.Project.delete", "line_number": 180, "usage_type": "call"}, {"api_name": "models.Project", "line_number": 180, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 181, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 182, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 182, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 177, "usage_type": "name"}, {"api_name": "decorators.admin_login_required", "line_number": 178, "usage_type": "name"}, {"api_name": "models.Project.complete", "line_number": 188, "usage_type": "call"}, {"api_name": "models.Project", "line_number": 188, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 189, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 190, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 190, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 185, "usage_type": "name"}, {"api_name": "decorators.admin_login_required", "line_number": 186, "usage_type": "name"}, {"api_name": "models.Project.reactivate", "line_number": 196, "usage_type": "call"}, {"api_name": "models.Project", "line_number": 196, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 197, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 198, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 198, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 193, "usage_type": "name"}, {"api_name": "decorators.admin_login_required", "line_number": 194, "usage_type": "name"}, {"api_name": "models.ProjectStep.all", "line_number": 204, "usage_type": "call"}, {"api_name": "models.ProjectStep", "line_number": 204, "usage_type": "name"}, {"api_name": "account.models.User.all", "line_number": 209, "usage_type": "call"}, {"api_name": "account.models.User", "line_number": 209, "usage_type": "name"}, {"api_name": "models.Project.get", "line_number": 213, "usage_type": "call"}, {"api_name": "models.Project", "line_number": 213, "usage_type": "name"}, {"api_name": "account.models.User.get", "line_number": 214, "usage_type": "call"}, {"api_name": "account.models.User", "line_number": 214, "usage_type": "name"}, {"api_name": "flask_login.current_user.get_account", "line_number": 214, "usage_type": "call"}, {"api_name": "flask_login.current_user", "line_number": 214, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 217, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 217, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 222, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 201, "usage_type": "name"}, {"api_name": "decorators.admin_login_required", "line_number": 202, "usage_type": "name"}, {"api_name": "forms.ProjectStepForm", "line_number": 228, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 229, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 229, "usage_type": "name"}, {"api_name": "utils.s3_upload", "line_number": 231, "usage_type": "call"}, {"api_name": "datetime.datetime.combine", "line_number": 238, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 238, "usage_type": "attribute"}, {"api_name": "datetime.time", "line_number": 238, "usage_type": "attribute"}, {"api_name": "datetime.datetime.combine", "line_number": 240, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 240, "usage_type": "attribute"}, {"api_name": "models.ProjectStep", "line_number": 242, "usage_type": "call"}, {"api_name": "helpers.pretty_date", "line_number": 252, "usage_type": "call"}, {"api_name": "helpers.pretty_date", "line_number": 253, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 258, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 259, "usage_type": "call"}, {"api_name": "account.models.User.all", "line_number": 262, "usage_type": "call"}, {"api_name": "account.models.User", "line_number": 262, "usage_type": "name"}, {"api_name": "helpers.distro", "line_number": 263, "usage_type": "call"}, {"api_name": "utils.send_email", "line_number": 265, "usage_type": "call"}, {"api_name": "account.models.User.all", "line_number": 268, "usage_type": "call"}, {"api_name": "account.models.User", "line_number": 268, "usage_type": "name"}, {"api_name": "helpers.distro", "line_number": 269, "usage_type": "call"}, {"api_name": "utils.send_sms", "line_number": 271, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 273, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 274, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 274, "usage_type": "call"}, {"api_name": "helpers.flash_errors", "line_number": 276, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 277, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 225, "usage_type": "name"}, {"api_name": "decorators.admin_login_required", "line_number": 226, "usage_type": "name"}, {"api_name": "forms.ProjectStepForm", "line_number": 283, "usage_type": "call"}, {"api_name": "models.ProjectStep.get", "line_number": 284, "usage_type": "call"}, {"api_name": "models.ProjectStep", "line_number": 284, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 286, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 286, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 295, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 297, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 297, "usage_type": "name"}, {"api_name": "utils.s3_upload", "line_number": 299, "usage_type": "call"}, {"api_name": "datetime.datetime.combine", "line_number": 306, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 306, "usage_type": "attribute"}, {"api_name": "datetime.time", "line_number": 306, "usage_type": "attribute"}, {"api_name": "datetime.datetime.combine", "line_number": 308, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 308, "usage_type": "attribute"}, {"api_name": "models.ProjectStep.update", "line_number": 311, "usage_type": "call"}, {"api_name": "models.ProjectStep", "line_number": 311, "usage_type": "name"}, {"api_name": "helpers.pretty_date", "line_number": 378, "usage_type": "call"}, {"api_name": "helpers.pretty_date", "line_number": 379, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 387, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 388, "usage_type": "call"}, {"api_name": "account.models.User.all", "line_number": 391, "usage_type": "call"}, {"api_name": "account.models.User", "line_number": 391, "usage_type": "name"}, {"api_name": "helpers.distro", "line_number": 392, "usage_type": "call"}, {"api_name": "utils.send_email", "line_number": 394, "usage_type": "call"}, {"api_name": "account.models.User.all", "line_number": 397, "usage_type": "call"}, {"api_name": "account.models.User", "line_number": 397, "usage_type": "name"}, {"api_name": "helpers.distro", "line_number": 398, "usage_type": "call"}, {"api_name": "utils.send_sms", "line_number": 400, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 402, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 403, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 403, "usage_type": "call"}, {"api_name": "helpers.flash_errors", "line_number": 405, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 406, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 406, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 280, "usage_type": "name"}, {"api_name": "decorators.admin_login_required", "line_number": 281, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 412, "usage_type": "call"}, {"api_name": "utils.s3_retrieve", "line_number": 412, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 410, "usage_type": "name"}, {"api_name": "models.ProjectStep.delete", "line_number": 418, "usage_type": "call"}, {"api_name": "models.ProjectStep", "line_number": 418, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 419, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 419, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 415, "usage_type": "name"}, {"api_name": "decorators.admin_login_required", "line_number": 416, "usage_type": "name"}, {"api_name": "models.ProjectStep.complete", "line_number": 425, "usage_type": "call"}, {"api_name": "models.ProjectStep", "line_number": 425, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 426, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 426, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 422, "usage_type": "name"}, {"api_name": "decorators.admin_login_required", "line_number": 423, "usage_type": "name"}, {"api_name": "models.ProjectStep.sort", "line_number": 431, "usage_type": "call"}, {"api_name": "models.ProjectStep", "line_number": 431, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 431, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 431, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 432, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 429, "usage_type": "name"}, {"api_name": "account.forms.InviteForm", "line_number": 438, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 440, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 440, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 441, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 443, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 443, "usage_type": "name"}, {"api_name": "account.models.User.get", "line_number": 444, "usage_type": "call"}, {"api_name": "account.models.User", "line_number": 444, "usage_type": "name"}, {"api_name": "account.models.User.get", "line_number": 445, "usage_type": "call"}, {"api_name": "account.models.User", "line_number": 445, "usage_type": "name"}, {"api_name": "flask_login.current_user.get_account", "line_number": 445, "usage_type": "call"}, {"api_name": "flask_login.current_user", "line_number": 445, "usage_type": "name"}, {"api_name": "helpers.send_invitation", "line_number": 449, "usage_type": "call"}, {"api_name": "account.models.User.add", "line_number": 458, "usage_type": "call"}, {"api_name": "account.models.User", "line_number": 458, "usage_type": "name"}, {"api_name": "flask_login.current_user.get_account", "line_number": 459, "usage_type": "call"}, {"api_name": "flask_login.current_user", "line_number": 459, "usage_type": "name"}, {"api_name": "flask_login.current_user.get_id", "line_number": 459, "usage_type": "call"}, {"api_name": "helpers.send_invitation", "line_number": 463, "usage_type": "call"}, {"api_name": "account.models.User.update", "line_number": 464, "usage_type": "call"}, {"api_name": "account.models.User", "line_number": 464, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 466, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 467, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 467, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 469, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 470, "usage_type": "call"}, {"api_name": "helpers.flash_errors", "line_number": 472, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 473, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 436, "usage_type": "name"}, {"api_name": "account.forms.InviteForm", "line_number": 478, "usage_type": "call"}, {"api_name": "account.models.User.get", "line_number": 479, "usage_type": "call"}, {"api_name": "account.models.User", "line_number": 479, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 482, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 482, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 489, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 491, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 491, "usage_type": "name"}, {"api_name": "account.models.User.get", "line_number": 493, "usage_type": "call"}, {"api_name": "account.models.User", "line_number": 493, "usage_type": "name"}, {"api_name": "flask_login.current_user.get_account", "line_number": 493, "usage_type": "call"}, {"api_name": "flask_login.current_user", "line_number": 493, "usage_type": "name"}, {"api_name": "account.models.User.update", "line_number": 495, "usage_type": "call"}, {"api_name": "account.models.User", "line_number": 495, "usage_type": "name"}, {"api_name": "helpers.send_invitation", "line_number": 496, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 497, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 499, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 500, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 502, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 502, "usage_type": "call"}, {"api_name": "helpers.flash_errors", "line_number": 504, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 505, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 476, "usage_type": "name"}, {"api_name": "account.models.User.delete", "line_number": 510, "usage_type": "call"}, {"api_name": "account.models.User", "line_number": 510, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 511, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 512, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 512, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 508, "usage_type": "name"}]} +{"seq_id": "202719613", "text": "# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright 2010 OpenStack LLC.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport webob\n\nfrom nova import compute\nfrom nova import log as logging\nfrom nova.api.openstack import wsgi\n\nfrom reddwarf import exception\nfrom reddwarf.api import common\nfrom reddwarf.api import deserializer\nfrom reddwarf.db import api as dbapi\nfrom reddwarf.guest import api as guest_api\nfrom reddwarf.guest.db import models\n\n\nLOG = logging.getLogger('reddwarf.api.users')\nLOG.setLevel(logging.DEBUG)\n\n\nclass Controller(object):\n \"\"\" The User Controller for the Platform API \"\"\"\n\n def __init__(self):\n self.guest_api = guest_api.API()\n self.compute_api = compute.API()\n super(Controller, self).__init__()\n\n def show(self, req, instance_id, id):\n raise exception.NotImplemented()\n \n def index(self, req, instance_id):\n \"\"\" Returns a list database users for the db instance \"\"\"\n LOG.info(\"Call to Users index - %s\", instance_id)\n LOG.debug(\"%s - %s\", req.environ, req.body)\n local_id = dbapi.localid_from_uuid(instance_id)\n ctxt = req.environ['nova.context']\n common.instance_available(ctxt, instance_id, local_id, self.compute_api)\n try:\n result = self.guest_api.list_users(ctxt, local_id)\n except Exception as err:\n LOG.error(err)\n raise exception.InstanceFault(\"Unable to get the list of users\")\n LOG.debug(\"LIST USERS RESULT - %s\", str(result))\n users = {'users':[]}\n for user in result:\n mysql_user = models.MySQLUser()\n mysql_user.deserialize(user)\n dbs = []\n for db in mysql_user.databases:\n dbs.append({'name': db['_name']})\n users['users'].append({'name': mysql_user.name, 'databases': dbs})\n LOG.debug(\"LIST USERS RETURN - %s\", users)\n return users\n\n def delete(self, req, instance_id, id):\n \"\"\" Deletes a user in the db instance \"\"\"\n LOG.info(\"Call to Delete User - %s for instance %s\",\n id, instance_id)\n LOG.debug(\"%s - %s\", req.environ, req.body)\n local_id = dbapi.localid_from_uuid(instance_id)\n ctxt = req.environ['nova.context']\n common.instance_available(ctxt, instance_id, local_id, self.compute_api)\n try:\n user = models.MySQLUser()\n user.name = id\n except ValueError as ve:\n LOG.error(ve)\n raise exception.BadRequest(ve.message)\n\n self.guest_api.delete_user(ctxt, local_id, user.serialize())\n return webob.Response(status_int=202)\n\n def create(self, req, instance_id, body):\n \"\"\" Creates a new user for the db instance \"\"\"\n self._validate(body)\n\n LOG.info(\"Call to Create Users for instance %s\", instance_id)\n LOG.debug(\"%s - %s\", req.environ, body)\n local_id = dbapi.localid_from_uuid(instance_id)\n ctxt = req.environ['nova.context']\n common.instance_available(ctxt, instance_id, local_id, self.compute_api)\n\n users = common.populate_users(body.get('users', ''))\n self.guest_api.create_user(ctxt, local_id, users)\n return webob.Response(status_int=202)\n\n def _validate(self, body):\n \"\"\"Validate that the request has all the required parameters\"\"\"\n if not body:\n raise exception.BadRequest(\"The request contains an empty body\")\n\n if not body.get('users', ''):\n raise exception.BadRequest(\"Required element/key 'users' was not \"\n \"specified\")\n for user in body.get('users'):\n if not user.get('name'):\n raise exception.BadRequest(\"Required attribute/key 'name' was \"\n \"not specified\")\n if not user.get('password'):\n raise exception.BadRequest(\"Required attribute/key 'password' \"\n \"was not specified\")\n\n\ndef create_resource(version='1.0'):\n controller = {\n '1.0': Controller,\n }[version]()\n\n metadata = {\n \"attributes\": {\n 'user': ['name', 'password']\n },\n }\n\n xmlns = {\n '1.0': common.XML_NS_V10,\n }[version]\n\n serializers = {\n 'application/xml': wsgi.XMLDictSerializer(metadata=metadata,\n xmlns=xmlns),\n }\n\n deserializers = {\n 'application/xml': deserializer.UserXMLDeserializer(),\n }\n\n response_serializer = wsgi.ResponseSerializer(body_serializers=serializers)\n request_deserializer = wsgi.RequestDeserializer(body_deserializers=deserializers)\n\n return wsgi.Resource(controller, deserializer=request_deserializer,\n serializer=response_serializer)\n", "sub_path": "reddwarf/api/users.py", "file_name": "users.py", "file_ext": "py", "file_size_in_byte": 5380, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "nova.log.getLogger", "line_number": 32, "usage_type": "call"}, {"api_name": "nova.log", "line_number": 32, "usage_type": "name"}, {"api_name": "nova.log.DEBUG", "line_number": 33, "usage_type": "attribute"}, {"api_name": "nova.log", "line_number": 33, "usage_type": "name"}, {"api_name": "reddwarf.guest.api.API", "line_number": 40, "usage_type": "call"}, {"api_name": "reddwarf.guest.api", "line_number": 40, "usage_type": "name"}, {"api_name": "nova.compute.API", "line_number": 41, "usage_type": "call"}, {"api_name": "nova.compute", "line_number": 41, "usage_type": "name"}, {"api_name": "reddwarf.exception.NotImplemented", "line_number": 45, "usage_type": "call"}, {"api_name": "reddwarf.exception", "line_number": 45, "usage_type": "name"}, {"api_name": "reddwarf.db.api.localid_from_uuid", "line_number": 51, "usage_type": "call"}, {"api_name": "reddwarf.db.api", "line_number": 51, "usage_type": "name"}, {"api_name": "reddwarf.api.common.instance_available", "line_number": 53, "usage_type": "call"}, {"api_name": "reddwarf.api.common", "line_number": 53, "usage_type": "name"}, {"api_name": "reddwarf.exception.InstanceFault", "line_number": 58, "usage_type": "call"}, {"api_name": "reddwarf.exception", "line_number": 58, "usage_type": "name"}, {"api_name": "reddwarf.guest.db.models.MySQLUser", "line_number": 62, "usage_type": "call"}, {"api_name": "reddwarf.guest.db.models", "line_number": 62, "usage_type": "name"}, {"api_name": "reddwarf.db.api.localid_from_uuid", "line_number": 76, "usage_type": "call"}, {"api_name": "reddwarf.db.api", "line_number": 76, "usage_type": "name"}, {"api_name": "reddwarf.api.common.instance_available", "line_number": 78, "usage_type": "call"}, {"api_name": "reddwarf.api.common", "line_number": 78, "usage_type": "name"}, {"api_name": "reddwarf.guest.db.models.MySQLUser", "line_number": 80, "usage_type": "call"}, {"api_name": "reddwarf.guest.db.models", "line_number": 80, "usage_type": "name"}, {"api_name": "reddwarf.exception.BadRequest", "line_number": 84, "usage_type": "call"}, {"api_name": "reddwarf.exception", "line_number": 84, "usage_type": "name"}, {"api_name": "webob.Response", "line_number": 87, "usage_type": "call"}, {"api_name": "reddwarf.db.api.localid_from_uuid", "line_number": 95, "usage_type": "call"}, {"api_name": "reddwarf.db.api", "line_number": 95, "usage_type": "name"}, {"api_name": "reddwarf.api.common.instance_available", "line_number": 97, "usage_type": "call"}, {"api_name": "reddwarf.api.common", "line_number": 97, "usage_type": "name"}, {"api_name": "reddwarf.api.common.populate_users", "line_number": 99, "usage_type": "call"}, {"api_name": "reddwarf.api.common", "line_number": 99, "usage_type": "name"}, {"api_name": "webob.Response", "line_number": 101, "usage_type": "call"}, {"api_name": "reddwarf.exception.BadRequest", "line_number": 106, "usage_type": "call"}, {"api_name": "reddwarf.exception", "line_number": 106, "usage_type": "name"}, {"api_name": "reddwarf.exception.BadRequest", "line_number": 109, "usage_type": "call"}, {"api_name": "reddwarf.exception", "line_number": 109, "usage_type": "name"}, {"api_name": "reddwarf.exception.BadRequest", "line_number": 113, "usage_type": "call"}, {"api_name": "reddwarf.exception", "line_number": 113, "usage_type": "name"}, {"api_name": "reddwarf.exception.BadRequest", "line_number": 116, "usage_type": "call"}, {"api_name": "reddwarf.exception", "line_number": 116, "usage_type": "name"}, {"api_name": "reddwarf.api.common.XML_NS_V10", "line_number": 132, "usage_type": "attribute"}, {"api_name": "reddwarf.api.common", "line_number": 132, "usage_type": "name"}, {"api_name": "nova.api.openstack.wsgi.XMLDictSerializer", "line_number": 136, "usage_type": "call"}, {"api_name": "nova.api.openstack.wsgi", "line_number": 136, "usage_type": "name"}, {"api_name": "reddwarf.api.deserializer.UserXMLDeserializer", "line_number": 141, "usage_type": "call"}, {"api_name": "reddwarf.api.deserializer", "line_number": 141, "usage_type": "name"}, {"api_name": "nova.api.openstack.wsgi.ResponseSerializer", "line_number": 144, "usage_type": "call"}, {"api_name": "nova.api.openstack.wsgi", "line_number": 144, "usage_type": "name"}, {"api_name": "nova.api.openstack.wsgi.RequestDeserializer", "line_number": 145, "usage_type": "call"}, {"api_name": "nova.api.openstack.wsgi", "line_number": 145, "usage_type": "name"}, {"api_name": "nova.api.openstack.wsgi.Resource", "line_number": 147, "usage_type": "call"}, {"api_name": "nova.api.openstack.wsgi", "line_number": 147, "usage_type": "name"}]} +{"seq_id": "275657294", "text": "from typing import Dict, Optional\n\nimport timm\nimport torch.nn as nn\n\nfrom src import layer\n\n\nclass CustomModel(nn.Module):\n def __init__(\n self,\n n_classes: int,\n model_name: str = \"resnet50\",\n pooling_name: str = \"GeM\",\n args_pooling: Optional[Dict] = None,\n ):\n super(CustomModel, self).__init__()\n\n self.backbone = timm.create_model(model_name, pretrained=True)\n\n final_in_features = list(self.backbone.children())[-1].in_features\n self.backbone = nn.Sequential(*list(self.backbone.children())[:-2])\n\n self.pooling = getattr(layer, pooling_name)(**args_pooling)\n\n self.act = nn.ReLU()\n self.drop = nn.Dropout(p=0.5)\n self.fc = nn.Linear(final_in_features, n_classes)\n\n def forward(self, x):\n x = self.backbone(x)\n x = self.pooling(x)\n x = x.view(len(x), -1)\n x = self.act(x)\n x = self.drop(x)\n x = self.fc(x)\n return x\n", "sub_path": "experiments/exp_026/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 972, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "torch.nn.Module", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 9, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 15, "usage_type": "name"}, {"api_name": "timm.create_model", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "src.layer", "line_number": 24, "usage_type": "argument"}, {"api_name": "torch.nn.ReLU", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "407865577", "text": "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef create_df(key):\n return df_responses.iloc[df_orgs_private_grouped.groups[key]]\n\n\ndef get_methods(obj):\n # Getting all methods from the groupby object:\n meth=[method_name for method_name in dir(obj) if callable(getattr(obj, method_name)) & ~method_name.startswith('_')]\n\n # Printing the result\n print(IPython.utils.text.columnize(meth))\n\n\ndef get_model_group_indices(group):\n return df_model.groupby(['dim', 'lvl']).groups[group[0], int(group[1])]\n\n\ndef get_levels(df_responses_group, dim):\n df_dim=pd.DataFrame()\n for lvl in dict_mod[dim]:\n df_dim[lvl]=df_responses_group[dict_mod[dim][lvl]].mean(axis=1)\n\n df_dim[df_dim.apply(lambda x: x >= 5)]=0\n df_dim['max']=df_dim.apply(lambda row: next((i for i, x in enumerate(row) if x), None), axis=1)\n\n df_dim[df_dim.isna()]=0\n df_dim['partial']=df_dim.apply(lambda row: 0.25 * (-1+row[int(row[['max']])+1]), axis=1)\n df_dim['level']=df_dim.apply(lambda row: row['max']+row['partial']+1, axis=1)\n\n # df_dim = df_dim['level']\n df_dim['level'][df_dim['level']==0.75]=5.0\n df_dim=df_dim[['level']]\n return df_dim\n\n\ndef analyze(df_responses):\n df=df_responses\n for dim in dimensions:\n df[dim]=get_levels(df_responses, dim)\n df=df[dimensions]\n return df\n\n\ndef rank_plot(df):\n fig, ax=plt.subplots(len(df.keys()), figsize=(10, 50))\n fig.subplots_adjust(hspace=0.5)\n # fig.\n\n for i, key in enumerate(list(df.keys())):\n ax[i].bar(df[key]['name'].values, df[key]['overall'].values)\n ax[i].set_title('{}'.format(str(key)))\n ax[i].set_xlabel('x_label') # ax[i].set_xticks(rotation=70)\n\n", "sub_path": "final_project/tasks/functions.py", "file_name": "functions.py", "file_ext": "py", "file_size_in_byte": 1719, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pandas.DataFrame", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}]} +{"seq_id": "612097395", "text": "import collections\nimport csv\n\nfrom dancedeets import app\nfrom dancedeets import base_servlet\nfrom dancedeets import fb_api\nfrom dancedeets.events import event_emails\nfrom . import users\n\n\n@app.route('/user/unsubscribe')\nclass UserUnsubscribeHandler(base_servlet.BaseRequestHandler):\n def requires_login(self):\n return False\n\n def get(self):\n self.finish_preload()\n #email = self.request.get('email')\n #email_hash = self.request.get('email_hash')\n #if compute_email_hash(email) == email_hash:\n # self.display['email'] = email\n #else:\n # print error message\n self.display['email'] = self.request.get('email')\n self.render_template('user_unsubscribe')\n\n def post(self):\n self.finish_preload()\n self.errors_are_fatal()\n email = self.request.get('email')\n if not email:\n self.redirect('/')\n\n email = email.lower()\n user = users.User.query(users.User.email == email).get()\n weekly = True\n promoter = True\n if weekly:\n if user:\n user.send_email = False\n user.put()\n if promoter:\n event_emails.unsubscribe_email(email)\n if self.user:\n self.user.add_message(\"Successfully unsubscribed %s!\" % email)\n self.redirect('/user/edit')\n else:\n self.redirect('/')\n\n\n@app.route('/user/edit')\nclass UserHandler(base_servlet.BaseRequestHandler):\n def get(self):\n self.finish_preload()\n\n defaults = {}\n user = users.User.get_by_id(self.fb_uid)\n for k in dir(user):\n defaults[k] = getattr(user, k)\n for field in defaults.keys():\n if self.request.get(field):\n defaults[field] = self.request.get(field)\n self.display['defaults'] = defaults\n\n #location_too_far = False\n #location_unknown = False\n\n #TODO(lambert): implement distance-from-saved-location and current-location better, via ajax and geo-api call\n\n self.render_template('user')\n\n def post(self):\n self.finish_preload()\n self.update_user()\n # Disabled due to an error, the user.compute_derived_properties does some GeoCode lookups which are not ancestor queries.\n #db.run_in_transaction(self.update_user)\n self.user.add_message(\"Settings saved!\")\n self.redirect('/')\n\n def update_user(self):\n user = users.User.get_by_id(self.fb_uid)\n for field in ['location', 'distance_units']:\n form_value = self.request.get(field)\n setattr(user, field, form_value)\n user.distance = min(self.request.get('distance'), '500')\n user.min_attendees = int(self.request.get('min_attendees'))\n if user.location:\n user.compute_derived_properties(self.fbl.fetched_data(fb_api.LookupUser, self.fb_uid))\n if not user.location_country:\n self.add_error(\"No country for location %r\" % user.location)\n else:\n self.add_error(\"No location\")\n user.email = self.request.get('email')\n #TODO(lambert): add an option for doing email \"via facebook\" as well. not everyone uses email.\n for field in ['send_email']:\n form_value = self.request.get(field) == \"true\"\n setattr(user, field, form_value)\n self.errors_are_fatal()\n user.put()\n\n\n@app.route('/tools/show_users')\nclass ShowUsersHandler(base_servlet.BaseRequestHandler):\n def get(self):\n self.finish_preload()\n num_fetch_users = int(self.request.get('num_users', 500))\n order_field = self.request.get('order_field', 'creation_time')\n all_users = users.User.query().order(-getattr(users.User, order_field)).fetch(num_fetch_users)\n client_counts = collections.defaultdict(lambda: 0)\n for user in all_users:\n for client in user.clients:\n client_counts[client] += 1\n user_ids = [x.fb_uid for x in all_users]\n fb_users = self.fbl.get_multi(fb_api.LookupUser, user_ids, allow_fail=True)\n\n self.display['client_counts'] = client_counts\n self.display['num_users'] = len(all_users)\n self.display['num_active_users'] = len([x for x in all_users if not x.expired_oauth_token])\n self.display['users'] = all_users\n self.display['fb_users'] = fb_users\n self.display['track_analytics'] = False\n self.render_template('show_users')\n\n\n@app.route('/tools/user_emails')\nclass UserEmailExportHandler(base_servlet.BaseRequestHandler):\n def get(self):\n self.response.headers['Content-Type'] = 'text/plain'\n num_fetch_users = int(self.request.get('num_users', 500))\n order_field = self.request.get('order_field', 'creation_time')\n all_users = users.User.query().order(-getattr(users.User, order_field)).fetch(num_fetch_users)\n writer = csv.writer(self.response.out)\n writer.writerow(['Email', 'Full Name', 'First Name', 'Last Name', 'Expired Token', 'Weekly Subscription', 'Locale', 'Country'])\n for user in all_users:\n if user.email:\n trimmed_locale = user.locale or ''\n if '_' in trimmed_locale:\n trimmed_locale = trimmed_locale.split('_')[0]\n writer.writerow([\n user.email.encode('utf8'),\n (user.full_name or '').encode('utf8'),\n (user.first_name or '').encode('utf8'),\n (user.last_name or '').encode('utf8'),\n unicode(user.expired_oauth_token),\n unicode(user.send_email),\n trimmed_locale,\n user.location_country or '',\n ])\n", "sub_path": "server/dancedeets/users/user_servlets.py", "file_name": "user_servlets.py", "file_ext": "py", "file_size_in_byte": 5772, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "dancedeets.base_servlet.BaseRequestHandler", "line_number": 12, "usage_type": "attribute"}, {"api_name": "dancedeets.base_servlet", "line_number": 12, "usage_type": "name"}, {"api_name": "dancedeets.events.event_emails.unsubscribe_email", "line_number": 43, "usage_type": "call"}, {"api_name": "dancedeets.events.event_emails", "line_number": 43, "usage_type": "name"}, {"api_name": "dancedeets.app.route", "line_number": 11, "usage_type": "call"}, {"api_name": "dancedeets.app", "line_number": 11, "usage_type": "name"}, {"api_name": "dancedeets.base_servlet.BaseRequestHandler", "line_number": 52, "usage_type": "attribute"}, {"api_name": "dancedeets.base_servlet", "line_number": 52, "usage_type": "name"}, {"api_name": "dancedeets.fb_api.LookupUser", "line_number": 88, "usage_type": "attribute"}, {"api_name": "dancedeets.fb_api", "line_number": 88, "usage_type": "name"}, {"api_name": "dancedeets.app.route", "line_number": 51, "usage_type": "call"}, {"api_name": "dancedeets.app", "line_number": 51, "usage_type": "name"}, {"api_name": "dancedeets.base_servlet.BaseRequestHandler", "line_number": 103, "usage_type": "attribute"}, {"api_name": "dancedeets.base_servlet", "line_number": 103, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 109, "usage_type": "call"}, {"api_name": "dancedeets.fb_api.LookupUser", "line_number": 114, "usage_type": "attribute"}, {"api_name": "dancedeets.fb_api", "line_number": 114, "usage_type": "name"}, {"api_name": "dancedeets.app.route", "line_number": 102, "usage_type": "call"}, {"api_name": "dancedeets.app", "line_number": 102, "usage_type": "name"}, {"api_name": "dancedeets.base_servlet.BaseRequestHandler", "line_number": 126, "usage_type": "attribute"}, {"api_name": "dancedeets.base_servlet", "line_number": 126, "usage_type": "name"}, {"api_name": "csv.writer", "line_number": 132, "usage_type": "call"}, {"api_name": "dancedeets.app.route", "line_number": 125, "usage_type": "call"}, {"api_name": "dancedeets.app", "line_number": 125, "usage_type": "name"}]} +{"seq_id": "26791575", "text": "import xlrd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.optimize import curve_fit\nimport matplotlib.ticker as mticker\n\n################################################################\n\nf = mticker.ScalarFormatter(useOffset=False, useMathText=True)\ng = lambda x, pos : \"${}$\".format(f._formatSciNotation('%.3e' % x))\nfmt = mticker.FuncFormatter(g)\n################################################################\n\n\ndocument = xlrd.open_workbook(\"Distribution spatiale de l'intensité du jet atomique non-défléchi.xlsx\")\nfeuille_1 = document.sheet_by_index(0)\n\ns_m = np.array([])\ns_p = np.array([])\nerr_s = np.array([])\nI_EA = np.array([])\n\nfor r in range(12, 18):\n s_m = np.append(s_m, feuille_1.cell_value(rowx=r, colx=6))\n s_p = np.append(s_p, np.abs(feuille_1.cell_value(rowx=r, colx=5)))\n err_s = np.append(err_s, feuille_1.cell_value(rowx=r, colx=13))\n I_EA = np.append(I_EA, feuille_1.cell_value(rowx=r, colx=2))\n\nerrI_EA = np.array([0.1] * len(I_EA))\n\ns_moy = (s_m + s_p)/2\nerr_s_moy = 2*err_s\n\ndef lin(x, a, b):\n return a*x + b\n\npopt, pcov = curve_fit(lin, I_EA, s_moy)\nx = np.linspace(min(I_EA), max(I_EA), 1000)\n\nresiduals = s_moy - lin(I_EA, *popt)\nss_res = np.sum(residuals ** 2)\nss_tot = np.sum((s_moy-np.mean(s_moy))**2)\nr_squared = 1 - (ss_res / ss_tot)\n\nplt.errorbar(I_EA, s_m, xerr=errI_EA, yerr=err_s, marker='v', ls='None', label='$s_-$', color='#20B2AA')\nplt.errorbar(I_EA, s_p, xerr=errI_EA, yerr=err_s, marker='^', ls='None', label='$s_+$', color='#FFD700')\nplt.plot(x, lin(x, *popt), linestyle='-', color='black', label=\"Fit linéaire de la moyenne de $s_+$ et $s_-$ :\"\n f\"\\n$s_{{moy}} = ${fmt(popt[0])}$I + ${fmt(popt[1])} $(R^2 = {r_squared:.3f})$\")\nplt.legend()\nplt.xlabel(\"Intensité de l'électro-aimant $[A]$\")\nplt.ylabel(\"Déflexion induite par l'électro-aimant $[mm]$\")\nplt.savefig('figures/s_vs_I(pico).png', dpi=200)\nplt.show()\n", "sub_path": "s_vs_I(pico).py", "file_name": "s_vs_I(pico).py", "file_ext": "py", "file_size_in_byte": 1958, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "matplotlib.ticker.ScalarFormatter", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.ticker.FuncFormatter", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 11, "usage_type": "name"}, {"api_name": "xlrd.open_workbook", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 29, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.errorbar", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.errorbar", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}]} +{"seq_id": "275670329", "text": "from bs4 import BeautifulSoup\r\nfrom urllib import parse\r\nimport urllib.request as req\r\nfrom urllib.parse import urlencode\r\nimport datetime \r\nimport csv\r\nimport re\r\nfrom twitercollect import findingNumofCommentsInTwits\r\n\r\n#주어진 url로 beautifulsoup 객체를 만들어준다\r\ndef setbsObj(url):\r\n res = req.urlopen(url).read()\r\n bsObj = BeautifulSoup(res, \"html.parser\")\r\n \r\n return bsObj;\r\n\r\n#네이버 영화에서 별점 정보를 뽑아오는 함수 \r\ndef getRatingInfo_Naver(bsObj):\r\n movieRatings = bsObj.select(\"div.star_score\")\r\n #RatingList saves the all rating information\r\n #[관람객 평점, 기자평론가 평점, 네티즌 평점]\r\n RatingList = []\r\n \r\n #movie page에서 별점 정보를 뽑아온다. \r\n for i, ratings in enumerate(movieRatings):\r\n #it saves the three information\r\n if i < 3:\r\n movieScore = ratings.select(\"em\")\r\n Score = \"\"\r\n for rating in movieScore:\r\n Score += rating.string\r\n RatingList.append(Score)\r\n \r\n return RatingList\r\n\r\ndef clean_text(text):\r\n cleaned_text = re.sub('[\\{\\}\\[\\]\\/?.,;:|\\)*~`!^\\-_+<>@\\#$%&\\\\\\=\\(\\'\\\"]',\r\n '', text)\r\n cleaned_text = cleaned_text.strip()\r\n return cleaned_text\r\n\r\n\r\n\r\ndef decodeSearch(searchTitle):\r\n query = {\r\n 'query' : searchTitle\r\n }\r\n \r\n search = parse.urlencode(query, encoding='utf-8', doseq=True)\r\n \r\n return search\r\n\r\ndef getMoviepage_Naver(searchTitle):\r\n search = decodeSearch(searchTitle)\r\n \r\n baseUrl = \"https://search.naver.com/search.naver?sm=top_hty&fbm=1&ie=utf8&\"\r\n \r\n searchedPage = baseUrl + search\r\n \r\n bsObj = setbsObj(searchedPage)\r\n \r\n\r\n moviePage = bsObj.select_one(\"h3 > a\").attrs['href']\r\n \r\n return moviePage\r\n\r\n#네이버 영화에서 영화 정보를 list 자료형으로 반환해주는 함수 \r\ndef getMovieInfo_Naver(searchTitle):\r\n movieInfo = {}\r\n \r\n movieUrl = getMoviepage_Naver(searchTitle)\r\n bsObj = setbsObj(movieUrl)\r\n \r\n movieTitle = bsObj.select_one(\"h3.h_movie > a\").string\r\n \r\n movieInfo['title'] = movieTitle\r\n \r\n RatingList = getRatingInfo_Naver(bsObj)\r\n \r\n movieInfo['rating'] = RatingList\r\n \r\n movieType = bsObj.select(\"dl.info_spec > dd\")\r\n \r\n actor = []\r\n genre = []\r\n openday = []\r\n \r\n for i, Info in enumerate(movieType):\r\n for element in Info.select(\"a\"):\r\n if i==0 and element.attrs['href'].startswith('/movie/sdb/browsing/bmovie.nhn?genre'):\r\n genre.append(element.string)\r\n elif i==1 and element.attrs['href'].startswith('/movie/bi/pi/basic'):\r\n movieInfo['director'] = element.string\r\n elif i==2 and element.attrs['href'].startswith('/movie/bi/pi/basic'):\r\n actor.append(element.string)\r\n elif i==3 and element.attrs['href'].startswith('/movie/sdb/browsing/bmovie.nhn?grade'):\r\n movieInfo['age'] = element.string.split('세')[0]\r\n elif element.attrs['href'].startswith('/movie/sdb/browsing/bmovie.nhn?open='):\r\n openday.append(element.string)\r\n \r\n if len(openday) >=2: \r\n date = openday[1].replace('.', ' ').strip().replace(' ', '-')\r\n opendate = openday[0].strip() + '-' + date\r\n \r\n if opendate :\r\n movieInfo['opendate'] = opendate\r\n print(\"yes\")\r\n \r\n movieInfo['actor'] = actor\r\n movieInfo['genre'] = genre\r\n \r\n for Info in movieType:\r\n for element in Info.select(\"p.count\"):\r\n movieInfo['audience'] = element.getText().split(\"명\")[0]\r\n \r\n reviews = naverMovieReview(searchTitle)\r\n \r\n movieInfo['reviews'] = reviews\r\n \r\n twit_num_comments = findingNumofCommentsInTwits(movieInfo['opendate'], searchTitle)\r\n\r\n movieInfo['twit_comments'] = twit_num_comments\r\n \r\n return movieInfo\r\n\r\ndef naverMovieReview(searchTitle):\r\n codeUrl = getMoviepage_Naver(searchTitle).split('?')[1]\r\n \r\n baseUrl = \"https://movie.naver.com/movie/bi/mi/pointWriteFormList.nhn?\" + \\\r\n codeUrl + \"&type=after&isActualPointWriteExecute=false&isMileageSubscriptionAlready=false&isMileageSubscriptionReject=false&page=\"\r\n \r\n reviews = [] \r\n \r\n for i in range(10):\r\n bsObj = setbsObj(baseUrl + str(i+1))\r\n movieReviews = bsObj.select(\".score_reple > p\")\r\n for review in movieReviews:\r\n reviews.append(clean_text(review.getText()))\r\n \r\n #이제 얻은 review에서 가장 많이 등장한 단어 10개를 뽑아서 reviews에 저장하고 movie_info에 저장해준다. \r\n return reviews\r\n\r\ndef naverMovieRanking():\r\n url = 'https://movie.naver.com/movie/sdb/rank/rmovie.nhn?sel=cnt&date='\r\n dt = datetime.datetime.now()\r\n \r\n if dt.month < 10 : \r\n month = \"0\" + str(dt.month)\r\n else:\r\n month = str(dt.month)\r\n \r\n if dt.day < 10:\r\n day = \"0\" + str(dt.day)\r\n else:\r\n day = str(dt.day)\r\n \r\n today =\"\"+ str(dt.year) + str(month) + str(day)\r\n \r\n url = url + today\r\n \r\n bsObj = setbsObj(url)\r\n \r\n Ranking = bsObj.select(\"div.tit3 > a\")\r\n \r\n movieRanking = []\r\n \r\n for element in Ranking:\r\n movieRanking.append(element.string)\r\n \r\n return movieRanking\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n \r\n #movieData = []\r\n \r\n #movieRanking = naverMovieRanking()\r\n \r\n #for movie in movieRanking:\r\n # movieData.append(getMovieInfo_Naver(movie))\r\n \r\n #print(movieData)\r\n \r\n movieInfo = getMovieInfo_Naver(\"기생충\")\r\n print(movieInfo)\r\n \r\n \r\n \r\n \r\n \r\n ", "sub_path": "movie_scraper/movie_scraping.py", "file_name": "movie_scraping.py", "file_ext": "py", "file_size_in_byte": 5719, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "urllib.request.urlopen", "line_number": 12, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 12, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 13, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 37, "usage_type": "call"}, {"api_name": "urllib.parse.urlencode", "line_number": 49, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 49, "usage_type": "name"}, {"api_name": "twitercollect.findingNumofCommentsInTwits", "line_number": 120, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 145, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 145, "usage_type": "attribute"}]} +{"seq_id": "52856932", "text": "import sys\nimport os.path as op\n\nimport argparse\n\nfrom ..build import build_book\nfrom ..page import build_page\n\nDESCRIPTION = (\"Convert a collection of Jupyter Notebooks into HTML \"\n \"suitable for a course textbook.\")\n\nDESCRIPTION_PAGE = (\"Convert a single Jupyter Notebook into HTML.\")\n\n\ndef build():\n parser = argparse.ArgumentParser(description=DESCRIPTION)\n parser.add_argument(\n \"path_book\", help=\"Path to the root of the book repository.\")\n parser.add_argument(\"--template\", default=None,\n help=\"Path to the template nbconvert uses\"\n \" to build markdown files\")\n parser.add_argument(\"--config\", default=None,\n help=\"Path to the Jekyll configuration file\")\n parser.add_argument(\"--toc\", default=None,\n help=\"Path to the Table of Contents YAML file\")\n parser.add_argument(\"--overwrite\", action='store_true',\n help=\"Overwrite md files if they already exist.\")\n parser.add_argument(\"--execute\", action='store_true',\n help=\"Execute notebooks before converting them.\")\n parser.add_argument(\"--local-build\", action='store_true',\n help=\"Specify you are building site locally\"\n \" for later upload.\")\n parser.set_defaults(overwrite=False, execute=False)\n\n ###############################################\n # Default values and arguments\n\n args = parser.parse_args(sys.argv[2:])\n overwrite = bool(args.overwrite)\n execute = bool(args.execute)\n\n # Paths for our notebooks\n PATH_BOOK = op.abspath(args.path_book)\n\n PATH_TOC_YAML = args.toc if args.toc is not None else op.join(\n PATH_BOOK, '_data', 'toc.yml')\n CONFIG_FILE = args.config if args.config is not None else op.join(\n PATH_BOOK, '_config.yml')\n PATH_TEMPLATE = args.template if args.template is not None else op.join(\n PATH_BOOK, 'scripts', 'templates', 'html.tpl')\n\n local_build = args.local_build\n\n build_book(PATH_BOOK, PATH_TOC_YAML, CONFIG_FILE,\n PATH_TEMPLATE, local_build, execute, overwrite)\n\n\ndef page():\n parser = argparse.ArgumentParser(description=DESCRIPTION_PAGE)\n parser.add_argument(\n \"path_ntbk\", help=\"Path to the notebook you'll convert.\")\n parser.add_argument(\n \"path_html_output\", help=\"Path to the folder where HTML will be placed.\")\n parser.add_argument(\"template\", help=\"Path to a template to render the HTML\")\n parser.add_argument(\"--path_media_output\", default=None,\n help=\"The path to where images should be extracted\")\n parser.add_argument(\"--execute\", action='store_true', help=\"Execute the notebook before converting\")\n parser.set_defaults(execute=False)\n\n ###############################################\n # Default values and arguments\n\n args = parser.parse_args(sys.argv[2:])\n execute = bool(args.execute)\n\n # Paths for our notebooks\n PATH_PAGE = op.abspath(args.path_ntbk)\n PATH_HTML_OUTPUT = op.abspath(args.path_html_output)\n PATH_MEDIA_OUTPUT = args.path_media_output if args.path_media_output is not None else PATH_HTML_OUTPUT\n\n # Choose the template we'll use\n path_template = op.abspath(args.template)\n\n build_page(PATH_PAGE, PATH_HTML_OUTPUT, PATH_MEDIA_OUTPUT,\n execute, path_template)\n", "sub_path": "jupyter_book/commands/build.py", "file_name": "build.py", "file_ext": "py", "file_size_in_byte": 3398, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "name"}, {"api_name": "build.build_book", "line_number": 54, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 59, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 73, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "name"}, {"api_name": "page.build_page", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "484029908", "text": "import aiohttp\nimport discord\nfrom bs4 import BeautifulSoup, Comment\nfrom discord.ext import commands\nfrom .utils import checks\nimport io\nimport json\nimport random\n\nwith open(\"C:/DISCORD BOT/BladeAndSoul/bnstext.txt\") as j:\n\tbnsurl = j.read()\nwith open(\"C:/DISCORD BOT/BladeAndSoul/people.json\") as j:\n\tbns_people = json.load(j)\n\nclass bladeandsoul:\n\t\"\"\"\n\tblade and soul related commands\n\tin the future if possible, try to reinvent the save build functions to make them more useful\n\t\"\"\"\n\tdef __init__(self, bot):\n\t\tself.bot = bot\n\n\t@commands.command(aliases=[\"bnsEU\",\"BNSEU\",\"BNSeu\"])\n\t@checks.not_lounge()\n\tasync def bnseu(self, ctx, *, person : str = None):\n\t\tawait self.bns(ctx, person, \"eu\")\n\n\t@commands.command(aliases=[\"bnsNA\",\"BNSNA\",\"BNSna\",\"bns\",\"BNS\",\"Bns\"])#default region is NA\n\t@checks.not_lounge()\n\tasync def bnsna(self, ctx, *, person : str = None):\n\t\tawait self.bns(ctx, person, \"na\")\n\n\tdef bnscolor(self, classname): #can just have a json with key:value pairs for this\n\t\tif classname == 'Blade Master':\n\t\t\treturn [16718105, \"🔥: {p[fire]}({p[firep]}%)\\n⚡: {p[light]}({p[lightp]}%)\"]\n\t\tif classname == 'Kung Fu Master':\n\t\t\treturn [3325695, \"🔥: {p[fire]}({p[firep]}%)\\n💨: {p[wind]}({p[windp]}%)\"]\n\t\tif classname == 'Assassin':\n\t\t\treturn [2123412, \"🌙: {p[shadow]}({p[shadowp]}%)\\n⚡: {p[light]}({p[lightp]}%)\"]\n\t\tif classname == 'Destroyer':\n\t\t\treturn [10038562, \"🌙: {p[shadow]}({p[shadowp]}%)\\n⛰: {p[earth]}({p[earthp]}%)\"]\n\t\tif classname == 'Blade Dancer':\n\t\t\treturn [7419530, \"⚡: {p[light]}({p[lightp]}%)\\n💨: {p[wind]}({p[windp]}%)\"]\n\t\tif classname == 'Soul Fighter':\n\t\t\treturn [3066993, \"❄: {p[frost]}({p[frostp]}%)\\n⛰: {p[earth]}({p[earthp]}%)\"]\n\t\tif classname == 'Warlock':\n\t\t\treturn [15620599, \"❄: {p[frost]}({p[frostp]}%)\\n🌙: {p[shadow]}({p[shadowp]}%)\"]\n\t\tif classname == 'Force Master':\n\t\t\treturn [15105570, \"❄: {p[frost]}({p[frostp]}%)\\n🔥: {p[fire]}({p[firep]}%)\"]\n\t\tif classname == 'Summoner':\n\t\t\treturn [15844367, \"💨: {p[wind]}({p[windp]}%) \\n⛰: {p[earth]}({p[earthp]}%)\"]\n\t\tif classname == 'Gunslinger':\n\t\t\treturn [0xffa500, \"🔥: {p[fire]}({p[firep]}%)\\n🌙: {p[shadow]}({p[shadowp]}%)\"]\n\t\tif classname == 'Warden':\n\t\t\treturn [0x800020, \"⚡: {p[light]}({p[lightp]}%)\\n❄: {p[frost]}({p[frostp]}%)\"]\n\t\treturn [0,\"Element not known for this class\"]\n\n\tasync def bns(self, ctx, person, region):\n\t\tif person is None:\n\t\t\tif str(ctx.message.author.id) in bns_people:\n\t\t\t\tperson = bns_people[str(ctx.message.author.id)][\"ign\"]\n\t\t\t\tregion = bns_people[str(ctx.message.author.id)][\"region\"]\n\t\t\telse:\n\t\t\t\tawait ctx.send('the format for seeing a players bns info is \\'!bns (player ign)\\'')\n\t\t\t\treturn\n\t\tnewerM = person.lower()\n\t\tif len(newerM.split()) > 1:\n\t\t\tnewestM = '%20'.join(newerM.split())\n\t\telse:\n\t\t\tnewestM = newerM\n\t\tif \"faggot\" in newestM.lower():\n\t\t\tawait ctx.send('http://na-bns.ncsoft.com/ingame/bs/character/profile?c=Rain\\nhttp://na-bns.ncsoft.com/ingame/bs/character/profile?c=Minko')\n\t\tlink = \"http://{}-bns.ncsoft.com/ingame/bs/character/data/abilities.json?c={}\".format(region,newestM)\n\t\tasync with aiohttp.ClientSession() as session:\n\t\t\tasync with session.get(link) as r:\n\t\t\t\t#await ctx.send(\"currently working on the bns command since NCsoft changed the site\")\n\t\t\t\tif r.status == 400:\n\t\t\t\t\tawait ctx.send(\"For some reason the BNS website returned a status code of `400` for {}. Most likely due to Elemental Accessories.\".format(person))\n\t\t\t\t\treturn\n\t\t\t\tif r.status == 200:\n\t\t\t\t\t#if \"unavailable\" in await r.text():\n\t\t\t\t\t#\tawait ctx.send(\"NCSoft says this character information is unavailable\\n\"+link+'{}&s=101'.format(newestM))\n\t\t\t\t\t#\treturn\n\n\t\t\t\t\tall_stats = await r.json()\n\t\t\t\t\tif all_stats[\"result\"] != \"success\":\n\t\t\t\t\t\tawait ctx.send(\"NCSoft says this character information is unavailable\\nhttp://{}-bns.ncsoft.com/ingame/bs/character/profile?c={}\".format(region, newestM))\n\t\t\t\t\t\treturn\n\n\t\t\t\t\tstat = all_stats[\"records\"][\"total_ability\"]\n\t\t\t\t\tHMstat = all_stats[\"records\"][\"point_ability\"]\n\t\t\t\t\t\n\t\t\t\t\tsLink = \"http://{}-bns.ncsoft.com/ingame/bs/character/profile?c={}\".format(region, newestM)\n\t\t\t\t\tasync with aiohttp.ClientSession() as session:\n\t\t\t\t\t\tasync with session.get(sLink) as r:\n\t\t\t\t\t\t\tsoup = BeautifulSoup(await r.text(), 'html.parser')\n\t\t\t\t\tsig = soup.find_all(attrs={\"class\":\"signature\"})\n\t\t\t\t\ttry:\n\t\t\t\t\t\tclan = sig[0].find_all(attrs={\"class\":\"guild\"})[0].text\n\t\t\t\t\texcept:\n\t\t\t\t\t\tclan = 'None'\n\t\t\t\t\tclassname = sig[0].find_all(\"ul\")[0].li.string\n\t\t\t\t\tserver = sig[0].find_all(\"ul\")[0].find_all(\"li\")[2].string\n\t\t\t\t\tlevel = sig[0].find_all(\"li\")[1].text.split()[1]\n\t\t\t\t\ttry:\n\t\t\t\t\t\thmlevel = sig[0].find_all(\"li\")[1].find_all(attrs={\"class\":\"masteryLv\"})[0].string.replace(\"HongmoonLevel\", \"**HM:**\")\n\t\t\t\t\texcept:\n\t\t\t\t\t\thmlevel = \"**HM:** 0\"\n\t\t\t\t\tclassicon = soup.find_all(\"div\", class_=\"classThumb\")[0].img['src']\n\t\t\t\t\tname = \"{}{}\".format(soup.find_all(\"a\", href=\"#\")[0].string, soup.find_all(\"span\", attrs={'class':\"name\"})[0].string)\n\n\t\t\t\t\t#HM stat stuff\n\t\t\t\t\thmA = HMstat[\"offense_point\"]\n\t\t\t\t\thmD = HMstat[\"defense_point\"]\n\n\t\t\t\t\t#ATTACK STATS\n\t\t\t\t\tatt = stat[\"attack_power_value\"]\n\t\t\t\t\tpierce = stat[\"attack_pierce_value\"]\n\t\t\t\t\tpiercep = stat[\"attack_defend_pierce_rate\"]\n\t\t\t\t\tacc = stat[\"attack_hit_value\"]\n\t\t\t\t\taccp = stat[\"attack_hit_rate\"]\n\t\t\t\t\tchit =stat[\"attack_critical_value\"] \n\t\t\t\t\tchitp = stat[\"attack_critical_rate\"]\n\t\t\t\t\tcdmg = stat[\"attack_critical_damage_value\"]\n\t\t\t\t\tcdmgp = stat[\"attack_critical_damage_rate\"]\n\n\t\t\t\t\t#DEFENSE STATS\n\t\t\t\t\thp = int(stat[\"max_hp\"])\n\t\t\t\t\tdefense = stat[\"defend_power_value\"]\n\t\t\t\t\tdefensep = stat[\"defend_physical_damage_reduce_rate\"]\n\t\t\t\t\teva = stat[\"defend_dodge_value\"]\n\t\t\t\t\tevap = stat[\"defend_dodge_rate\"]\n\t\t\t\t\tblock = stat[\"defend_parry_value\"]\n\t\t\t\t\tblockp = stat[\"defend_parry_rate\"]\n\t\t\t\t\tcritd = stat[\"defend_critical_value\"]\n\t\t\t\t\tcritdp = stat[\"defend_critical_value\"]\n\n\t\t\t\t\t#ELE DAMAGE\n\t\t\t\t\teles = {\n\t\t\t\t\t\t\"fire\":stat[\"attack_attribute_fire_value\"],\n\t\t\t\t\t\t\"firep\":stat[\"attack_attribute_fire_rate\"],\n\t\t\t\t\t\t\"frost\":stat[\"attack_attribute_ice_value\"],\n\t\t\t\t\t\t\"frostp\":stat[\"attack_attribute_ice_rate\"],\n\t\t\t\t\t\t\"wind\":stat[\"attack_attribute_wind_value\"],\n\t\t\t\t\t\t\"windp\":stat[\"attack_attribute_wind_rate\"],\n\t\t\t\t\t\t\"earth\":stat[\"attack_attribute_earth_value\"],\n\t\t\t\t\t\t\"earthp\":stat[\"attack_attribute_earth_rate\"],\n\t\t\t\t\t\t\"light\":stat[\"attack_attribute_lightning_value\"],\n\t\t\t\t\t\t\"lightp\":stat[\"attack_attribute_lightning_rate\"],\n\t\t\t\t\t\t\"shadow\":stat[\"attack_attribute_void_value\"],\n\t\t\t\t\t\t\"shadowp\":stat[\"attack_attribute_void_rate\"]\n\t\t\t\t\t}\n\n\t\t\t\t\t#EMBED stuff\n\t\t\t\t\tlft = \"**Attack:** {} \\⭐ {}P\\n**Pierce:** {}({}%)\\n**Accuracy:** {}({}%)\\n**Critical Hit:** {}({}%)\\n**Critical Damage** {}({}%)\".format(att,hmA,pierce,piercep,acc,accp,chit,chitp,cdmg,cdmgp)\n\t\t\t\t\trgt = \"**HP:** {} \\⭐ {}P\\n**Defense:** {}({}%)\\n**Evasion:** {}({}%)\\n**Block:** {}({}%)\\n**Crit Defense:** {}({}%)\\n\".format(hp,hmD,defense,defensep,eva,evap,block,blockp,critd,critdp)\n\t\t\t\t\tembed = discord.Embed()\n\t\t\t\t\tembed.set_author(name=classname, icon_url=classicon)\n\t\t\t\t\tembed.title = name\n\t\t\t\t\tembed.url = sLink\n\t\t\t\t\tcl = self.bnscolor(classname)\n\t\t\t\t\tembed.color = cl[0]\n\t\t\t\t\tembed.add_field(name=\"__General Info__\", value=\"**Server:** {}\\n**Clan:** {}\\n**Level:** {} \\⭐ {}\".format(server,clan,level,hmlevel))\n\t\t\t\t\tembed.add_field(name=\"__Elemental Damage__\", value=cl[1].format(p=eles))\n\t\t\t\t\tembed.add_field(name=\"__Offensive__\", value=lft)\n\t\t\t\t\tembed.add_field(name=\"__Defensive__\", value=rgt)\n\t\t\t\t\ttry:\n\t\t\t\t\t\tasync with aiohttp.ClientSession() as session:\n\t\t\t\t\t\t\tasync with session.get(\"http://{}-bns.ncsoft.com/ingame/bs/character/data/equipments?c={}\".format(region,newestM)) as r:\n\t\t\t\t\t\t\t\tsoup2 = BeautifulSoup(await r.text(), 'html.parser')\n\t\t\t\t\t\t#\t\tgear = await r.json()\n\t\t\t\t\t\t#weap = gear[\"equipments\"][\"hand\"][\"equip\"][\"item\"][\"icon\"]\n\t\t\t\t\t\tweap = soup2.find_all(\"div\", class_=\"wrapWeapon\")[0].find_all(\"p\", class_=\"thumb\")[0].img['src']\n\t\t\t\t\t\tembed.set_thumbnail(url=weap)\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tprint(e)\n\t\t\t\t\t\tembed.set_thumbnail(url=\"http://i.imgur.com/yfzrHiy.png\")\n\t\t\t\t\tif newestM == \"rezorector\" or newestM == \"not%20rezo\":\n\t\t\t\t\t\tembed.set_image(url=\"https://cdn.discordapp.com/attachments/204813384888090626/307026647209476101/rezgay.png\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tembed.set_image(url=soup.find_all(\"div\", class_=\"charaterView\")[0].img['src']+\"?=\"+str(random.randint(0,5000)))\n\t\t\t\t\tembed.set_footer(text='Blade and Soul', icon_url='http://i.imgur.com/a1kk9Tq.png')\n\t\t\t\t\ttry:\n\t\t\t\t\t\tif int(att) >= 1350:\n\t\t\t\t\t\t\tembed.add_field(name='​', value=\"​\", inline=False)#dummy zero width character field, use this to move the fields around\n\t\t\t\t\t\t\tembed.set_footer(text=\"Whale and Soul\", icon_url=\"http://i.imgur.com/T6MP5xX.png\")\n\t\t\t\t\t\tm = await ctx.send(embed=embed)\n\t\t\t\t\t\tif int(att) >= 1350:\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tembed.set_footer(text=\"Whale and Soul\", icon_url=\"http://i.imgur.com/T6MP5xX.png\")\n\t\t\t\t\t\t\t\tawait m.add_reaction(\"🐋\")\n\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\tpass\n\t\t\t\t\texcept:#this is a lazy way to check for embeds, since this would also catch other errors\n\t\t\t\t\t\tawait ctx.send(\"Bot needs embed permissions to display BNS stats\")\n\t\t\t\telse:\n\t\t\t\t\tawait ctx.send('Character name does not exist')\n\n\t@commands.command(aliases=[\"bnsl\"])\n\tasync def bnslookup(self, ctx, *, member : discord.Member = None):\n\t\tif member is None:\n\t\t\tif str(ctx.message.author.id) in bns_people:\n\t\t\t\tmember = ctx.message.author\n\t\t\telse:\n\t\t\t\tawait ctx.send(\"Write or mention the name of a person in order to look them up!\")\n\t\t\t\treturn\n\t\tp = bns_people.get(str(member.id))\n\t\tif p is None:\n\t\t\tawait ctx.send(\"That person is not in the database\")\n\t\telse:\n\t\t\tregions = {\n\t\t\t\t\"na\":\"North America 💵\",\n\t\t\t\t\"eu\":\"Europe 💶\"\n\t\t\t}\n\t\t\tnewerM = bns_people[str(member.id)][\"ign\"].lower()\n\t\t\tif len(newerM.split()) > 1:\n\t\t\t\tnewestM = '%20'.join(newerM.split())\n\t\t\telse:\n\t\t\t\tnewestM = newerM\n\t\t\tlink = \"http://{}-bns.ncsoft.com/ingame/bs/character/profile?c={}\".format(bns_people[str(member.id)][\"region\"],newestM)\n\t\t\tembed = discord.Embed()\n\t\t\tembed.color = 0xFF0000\n\t\t\tembed.set_footer(text=\"Not Verified! Type !verify to see.\",icon_url=\"http://i.imgur.com/6bdro4H.png\")\n\t\t\tembed.url = link\n\t\t\tv = \"Not Verified ❌\"\n\t\t\tif p.get(\"verif\") == 1:\n\t\t\t\tv = \"Verified ☑\"\n\t\t\t\tembed.color = 0x0066CC\n\t\t\t\tembed.set_footer(text=\"Verified!\",icon_url=\"http://i.imgur.com/Ti6nrz3.png\")\n\t\t\tembed.set_author(name=str(member),icon_url=member.avatar_url.replace(\"gif\",\"png\"))\n\t\t\tembed.add_field(name=\"__Character name:__\",value=p[\"ign\"])\n\t\t\tembed.add_field(name=\"__BNS Account Name:__\",value=p[\"acc\"])\n\t\t\tembed.add_field(name=\"__Region:__\",value=regions[p[\"region\"].lower()])\n\t\t\tawait ctx.send(embed=embed)\n\n\t@commands.command(brief=\"This command tells you how you can get verified\")\n\tasync def verify(self, ctx, *,id : str = None):\n\t\tif ctx.message.author.id == 90886475373109248 and id is not None:\n\t\t\ttry:\n\t\t\t\tbns_people[id][\"verif\"] = 1\n\t\t\t\twith open('C:/DISCORD BOT/BladeAndSoul/people.json', 'w') as f:\n\t\t\t\t\tjson.dump(bns_people, f, indent = 4)\n\t\t\t\t\tawait ctx.send(\"Account has been verified!☑\")\n\t\t\texcept:\n\t\t\t\tawait ctx.send(\"Verification failed!❌\")\n\t\t\treturn\n\t\tembed = discord.Embed()\n\t\tembed.description = \"In order to get verified with `!bnslookup`, **you will need to contact Comphus#4981 with proof**. Sending a screenshot of you timestamping ingame with your char name next to it, a timestamp somewhere, and the words \\\"BASEDBOT\\\" shown ingame should be fine.\\n\\n__**Do not**__ friend request Comphus#4981 as he already has too many requests and doesnt know who is who.\\n**If you do not have a way to contact him, you may [join the bot server by clicking here to get in contact](https://discord.gg/Gvt3Ks8)**\"\n\t\tembed.color = 0x0066CC\n\t\tawait ctx.send(embed=embed)\n\n\t@commands.command(aliases=[\"bnss\"])\n\tasync def bnssave(self, ctx, region : str = None, *,person : str = None):\n\t\tif region is None:\n\t\t\tawait ctx.send(\"In order to use `!bnssave`, you must provide a region and a main character to save like so `!bnssave region yourchar`, where region is either na or eu.\\nOnce saved, you can use `!bns` or `!bnspvp` without having to use your name to pull up the info with the character you saved. In order to remove yourself from the list, type `!bnssave remove`. **This can be used to verify people with `!bnslookup` or aliased `!bnsl` to prevent identity fraud and such.** Type !verify to find out how to get verified\\n**If you think someone stole your name, contact Comphus#4981 with !verify**\")\n\t\t\treturn\n\t\tif region.lower() in (\"remove\",\"reset\"):\n\t\t\tif bns_people.get(str(ctx.message.author.id)) is not None:\n\t\t\t\tdel bns_people[str(ctx.message.author.id)]\n\t\t\t\twith open('C:/DISCORD BOT/BladeAndSoul/people.json', 'w') as f:\n\t\t\t\t\tjson.dump(bns_people, f, indent = 4)\n\t\t\t\t\tawait ctx.send(\"You have removed yourself from the list.\")\n\t\t\telse:\n\t\t\t\tawait ctx.send(\"You are not registered\")\n\t\t\treturn\n\t\tif person is None:\n\t\t\tawait ctx.send(\"Must provide a character to save/edit\")\n\t\t\treturn\n\t\tif region.lower() == \"edit\":\n\t\t\tid = str(ctx.message.author.id)\n\t\t\tif bns_people.get(id) is None:\n\t\t\t\tawait ctx.send(\"Your are not registered.\")\n\t\t\t\treturn\n\t\t\tif True:\n\t\t\t\tnewerM = person.lower()\n\t\t\t\tif len(newerM.split()) > 1:\n\t\t\t\t\tnewestM = '%20'.join(newerM.split())\n\t\t\t\telse:\n\t\t\t\t\tnewestM = newerM\n\t\t\t\tlink = \"http://{}-bns.ncsoft.com/ingame/bs/character/profile?c={}\".format(bns_people[id][\"region\"],newestM)\n\t\t\t\tprint(link)\n\t\t\t\tasync with aiohttp.ClientSession() as session:\n\t\t\t\t\tasync with session.get(link) as r:\n\t\t\t\t\t\tif r.status != 200:\n\t\t\t\t\t\t\tawait ctx.send(\"I could not get the info from the BNS website\")\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\tsoup = BeautifulSoup(await r.text(), 'html.parser')\n\t\t\t\tacc = soup.find_all(\"a\", href=\"#\")[0].string\n\t\t\t\tname = soup.find_all(\"span\", attrs={'class':\"name\"})[0].string\n\t\t\t\tif acc != bns_people[id][\"acc\"]\t:\n\t\t\t\t\tawait ctx.send(\"The account names do not match\")\n\t\t\t\t\treturn\n\t\t\t\tbns_people[id][\"ign\"] = name[1:-1]\n\t\t\t\twith open('C:/DISCORD BOT/BladeAndSoul/people.json', 'w') as f:\n\t\t\t\t\tjson.dump(bns_people, f, indent = 4)\n\t\t\t\t\tawait ctx.send(\"Character successfully edited\")\n\t\t\t\treturn\n\t\t\ttry:\n\t\t\t\tpass\n\t\t\texcept:\n\t\t\t\tawait ctx.send(\"I could not find the account associated with this character or the website may be down.\")\n\t\t\t\treturn\n\n\t\tif region.lower() not in (\"na\",\"eu\"):\n\t\t\tawait ctx.send(\"Invalid region, I can only save NA or EU\")\n\t\t\treturn\n\t\ttry:\n\t\t\tnewerM = person.lower()\n\t\t\tif len(newerM.split()) > 1:\n\t\t\t\tnewestM = '%20'.join(newerM.split())\n\t\t\telse:\n\t\t\t\tnewestM = newerM\n\t\t\tasync with aiohttp.ClientSession() as session:\n\t\t\t\tasync with session.get(\"http://{}-bns.ncsoft.com/ingame/bs/character/profile?c={}\".format(region,newestM)) as r:\n\t\t\t\t\tsoup = BeautifulSoup(await r.text(), 'html.parser')\n\t\t\tid = str(ctx.message.author.id)\n\t\t\tacc = soup.find_all(\"a\", href=\"#\")[0].string\n\t\t\tname = soup.find_all(\"span\", attrs={'class':\"name\"})[0].string\n\t\t\tif bns_people.get(id) is not None:\n\t\t\t\tawait ctx.send(\"Your discord id is already registered.\")\n\t\t\t\treturn\n\t\t\tfor i in bns_people:\n\t\t\t\tif acc in bns_people[i].get(\"acc\"):\n\t\t\t\t\tawait ctx.send(\"That name is already registered\")\n\t\t\t\t\treturn\n\t\t\tbns_people[id] = {\n\t\t\t\t\"acc\":acc,\n\t\t\t\t\"ign\":name[1:-1],\n\t\t\t\t\"region\":region,\n\t\t\t\t\"verif\":0\n\t\t\t}\n\t\t\twith open('C:/DISCORD BOT/BladeAndSoul/people.json', 'w') as f:\n\t\t\t\tjson.dump(bns_people, f, indent = 4)\n\t\t\t\tawait ctx.send(\"Character successfully saved\")\n\t\texcept:\n\t\t\tawait ctx.send(\"I could not find the account associated with this character or the website may be down.\")\n\t\t\treturn\n\n\n\t@commands.command(aliases=[\"bnspvpNA\",\"BNSPVPNA\",\"BNSpvpna\",\"BNSPVPna\",\"bnsp\",\"bnspna\",\"BNSP\",\"Bnsp\"])\n\tasync def bnspvp(self, ctx, *, person : str = None):\n\t\tawait self.pvp(ctx, person, \"na\")\n\n\t@commands.command(aliases=[\"bnspvpEU\",\"BNSPVPEU\",\"BNSpvpeu\",\"BNSPVPeu\",\"bnspeu\",\"BNSPEU\",\"Bnspeu\"])\n\tasync def bnspvpeu(self, ctx, *, person : str = None):\n\t\tawait self.pvp(ctx, person, \"eu\")\n\n\tdef rRank(self, rank):\n\t\tif rank >= 2100:\n\t\t\treturn [\"http://i.imgur.com/DUxiI7K.png\",16770919]\n\t\telif rank >= 1900:\n\t\t\treturn [\"http://i.imgur.com/DjMO8dP.png\",6697881]\n\t\telif rank >= 1600:\n\t\t\treturn [\"http://i.imgur.com/zGWrxqx.png\",16766720]\n\t\telif rank >= 1400:\n\t\t\treturn [\"http://i.imgur.com/A7UT4yj.png\",12632256]\n\t\telse:\n\t\t\treturn [\"http://i.imgur.com/GC4KKXH.png\",6700326]\n\t\n\tasync def pvp(self, ctx, person, region):\n\t\tif person is None:\n\t\t\tif str(ctx.message.author.id) in bns_people:\n\t\t\t\tperson = bns_people[str(ctx.message.author.id)][\"ign\"]\n\t\t\t\tregion = bns_people[str(ctx.message.author.id)][\"region\"]\n\t\t\telse:\n\t\t\t\tawait ctx.send('the format for seeing a player\\'s pvp info is \\'!bnspvp (player ign)\\'')\n\t\t\t\treturn\n\t\tnewerM = person.lower()\n\t\tif len(newerM.split()) > 1:\n\t\t\tnewestM = '%20'.join(newerM.split())\n\t\telse:\n\t\t\tnewestM = newerM\n\t\tlink = \"http://{}-bns.ncsoft.com/ingame/bs/character/profile?c={}\".format(region, newestM)\n\t\tasync with aiohttp.ClientSession() as session:\n\t\t\tasync with session.get(link) as r:\n\t\t\t\tif r.status != 200:\n\t\t\t\t\tawait ctx.send(\"Character name does not exist\")\n\t\t\t\t\treturn\n\t\t\t\tsoup = BeautifulSoup(await r.text(), 'html.parser')\n\t\ttry:\n\t\t\tclassicon = soup.find_all(\"div\", class_=\"classThumb\")[0].img['src']\n\t\texcept:\n\t\t\tpass\n\t\tclassname = soup.find_all(attrs={\"class\":\"signature\"})[0].find_all(\"ul\")[0].li.string\n\t\tname = \"{}{}\".format(soup.find_all(\"a\", href=\"#\")[0].string, soup.find_all(\"span\", attrs={'class':\"name\"})[0].string)\n\n\n\t\ttesting1 = soup.find_all(class_=\"characterArea\")[0].find_all(text=lambda text:isinstance(text, Comment))[3]\n\t\tsoup = BeautifulSoup(testing1, 'html.parser')\n\t\tp = soup.find_all(class_=\"season-title\")[0].span.string.replace(\"\\n\",\"\")\n\t\toneP = int(soup.find_all(class_=\"rank-point\")[0].string)\n\t\tif person == \"comphus\":\n\t\t\toneP = 2300\n\t\toneW= soup.find_all(class_=\"win-point\")[0].string\n\t\tthreeP = soup.find_all(class_=\"rank-point\")[1].string\n\t\tthreeW= soup.find_all(class_=\"win-point\")[1].string\n\t\t#await ctx.send(\"{}\\n1v1 rank:{} wins:{}\\n3v3 rank:{} wins{}\".format(p,oneP,oneW,threeP,threeW))\n\n\n\t\tembed = discord.Embed()\n\t\tembed.set_author(name=classname, icon_url=classicon)\n\t\tembed.title = name\n\t\tembed.url = link\n\t\tranked = self.rRank(oneP)\n\t\tembed.set_thumbnail(url=ranked[0])\n\t\tembed.color = ranked[1]\n\t\tembed.add_field(name=\"__Season Games__\",value=p.replace(\"Total \",\" Total games\\n\").replace(\"Wins\",\" Total Wins\"), inline=False)\n\t\tembed.add_field(name=\"1v1 Games\",value=\"Rank:{}\\nWins:{}\".format(oneP,oneW.replace(\"Victories \", \"\")))\n\t\tembed.add_field(name=\"3v3 Games\",value=\"Rank:{}\\nWins:{}\".format(threeP,threeW.replace(\"Victories \", \"\")))\n\n\t\tawait ctx.send(embed=embed)\n\n\n\t\t#print(soup.find_all(text=lambda text:isinstance(text, Comment)))\n\n\t@commands.command()\n\tasync def bnstree(self, ctx, *, name : str = None):\n\t\tawait ctx.send(self.bnst(name))\n\n\tdef bnst(self, name):#this needs to be updated and put into a dict but too lazy\n\t\tif name is None:\n\t\t\treturn 'https://bnstree.com/'\n\t\tname = name.lower()\n\t\tif 'blade master' == name or 'bm' == name:\n\t\t\treturn 'https://bnstree.com/classes/blade-master'\n\t\telif 'kfm' == name or 'kungfu master' == name or 'kung fu master' == name or 'kungfumaster' == name or 'kf' == name:\n\t\t\treturn 'https://bnstree.com/classes/kung-fu-master'\n\t\telif 'destroyer' == name or 'des' == name or 'de' == name or 'destro' == name or 'dest' == name:\n\t\t\treturn 'https://bnstree.com/classes/destroyer'\n\t\telif 'force master' == name or 'fm' == name or 'forcemaster' == name or 'force user' == name:\n\t\t\treturn 'https://bnstree.com/classes/force-master'\n\t\telif 'assassin' == name or 'as' == name or 'sin' == name:\n\t\t\treturn 'https://bnstree.com/classes/assassin'\n\t\telif 'summoner' == name or 'su' == name or 'summ' == name or 'sum' == name:\n\t\t\treturn 'https://bnstree.com/classes/summoner'\n\t\telif 'blade dancer' == name or 'bd' == name or 'bladedancer' == name or 'lbm' == name or 'lyn blade master' == name or 'lynblade master' == name or 'lyn blademaster' == name:\n\t\t\treturn 'https://bnstree.com/classes/blade-dancer'\n\t\telif 'warlock' == name or 'wl' == name or 'lock' == name:\n\t\t\treturn 'https://bnstree.com/classes/warlock'\n\t\telif 'soul fighter' == name or 'sf' == name or 'soulfighter' in name or 'chi master' in name or 'chimaster' in name:\n\t\t\treturn 'https://bnstree.com/classes/soul-fighter'\n\t\telif 'gun slinger' == name or 'gs' == name or 'gunslinger' in name or 'gunner' in name:\n\t\t\treturn 'https://bnstree.com/classes/gunslinger'\n\t\telse:\n\t\t\treturn '2nd argument not recognised'\n\n\t@commands.command(aliases=['bnsm','BNSmarket','BNSm',\"smp\",\"SMP\",\"Smp\",\"mp\",\"m\"])\n\t@checks.not_lounge()\n\tasync def bnsmarket(self, ctx, *, item : str = None):\n\t\tif item is None:\n\t\t\tawait ctx.send(\"In order to use the BNS market search function, type in whatever item after you type `!bnsmarket`,`!bnsm` or `!smp` so i can search through for it.\")\n\t\t\treturn\n\t\tnB = \"na\"\n\t\teB = \"eu\"\n\t\tschema = {}\n\t\tBNSschema = \"\"\"{{\n\t\t Market {{\n\t\t na: search(query: \"{}\", region: \"na\", exact: true) {{\n\t\t item {{\n\t\t name\n\t\t icon\n\t\t }}\n\t\t priceData: price {{\n\t\t items\n\t\t }}\n\t\t }}\n\t\t naF: search(query: \"{}\", region: \"na\", exact: false) {{\n\t\t \titem {{\n\t\t name\n\t\t icon\n\t\t }}\n\t\t priceData: price {{\n\t\t items\n\t\t }}\n\t\t }}\n\t\t eu: search(query: \"{}\", region: \"eu\", exact: true) {{\n\t\t priceData: price {{\n\t\t items\n\t\t }}\n\t\t }}\n\t\t euF: search(query: \"{}\", region: \"eu\", exact: false) {{\n\t\t priceData: price {{\n\t\t items\n\t\t }}\n\t\t }}\n\t\t }}\n\t\t}}\"\"\"\n\t\ttry: # can optimize this later\n\t\t\tschema[\"query\"] = BNSschema.format(item,item,item,item)\n\t\t\tasync with aiohttp.ClientSession() as session:\n\t\t\t\tasync with session.post(\"https://api.bnstree.com/graphql\", data=schema) as r:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tNA = await r.json()\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tprint(e)\n\t\t\t\t\t\tawait ctx.send(\"live market website is currently down\")\n\t\t\t\t\t\treturn\n\t\t\t\t\tif r.status != 200:\n\t\t\t\t\t\tawait ctx.send(\"https://bnstree.com/market returned a {} error\".format(r.status))\n\t\t\t\t\t\treturn\n\t\t\t\t\tif not NA[\"data\"][\"Market\"][nB].get(\"item\"):\n\t\t\t\t\t\tnB = \"naF\"\n\t\t\t\t\t\teB = \"euF\"\n\t\t\t\t\t\tif not NA[\"data\"][\"Market\"][nB].get(\"item\"):\n\t\t\t\t\t\t\tawait ctx.send(\"Sorry, I couldnt find any item relating to `{}`.\".format(item))\n\t\t\t\t\t\t\treturn\n\t\t\t\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\tawait ctx.send(\"live market website is currently down\")\n\t\t\treturn\n\t\tdef getPrice(region):\n\t\t\tpriceIndex = -1\n\t\t\tfreqIndex = 0\n\t\t\twhile freqIndex < 3:#list always contains 3 elements\n\t\t\t\tif not NA[\"data\"][\"Market\"][region][\"priceData\"][freqIndex].get(\"items\"):\n\t\t\t\t\tfreqIndex += 1\n\t\t\t\t\tcontinue\n\t\t\t\telif priceIndex*-1 == len(NA[\"data\"][\"Market\"][region][\"priceData\"][freqIndex][\"items\"]):\n\t\t\t\t\tbreak\n\t\t\t\tif NA[\"data\"][\"Market\"][region][\"priceData\"][freqIndex][\"items\"][priceIndex][1] == 0:\n\t\t\t\t\tpriceIndex += -1\n\t\t\t\telse:\n\t\t\t\t\treturn str(NA[\"data\"][\"Market\"][region][\"priceData\"][freqIndex][\"items\"][priceIndex][1])\n\t\tdef fmtPrice(r):\n\t\t\ttry:\n\t\t\t\tr = r[:-2] + \" \" + r[-2:]\n\t\t\t\tr = r[:-5] + \" \" + r[-5:]\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\tr = r.split()\n\t\t\twhile len(r) < 3:\n\t\t\t\tr.insert(0,\"00\")\n\t\t\treturn r\n\t\tNAg, NAs, NAc = fmtPrice(getPrice(nB))\n\t\tEUg, EUs, EUc = fmtPrice(getPrice(eB))\n\t\tembed = discord.Embed()\n\t\tembed.set_thumbnail(url=NA[\"data\"][\"Market\"][nB][\"item\"].get(\"icon\", \"http://i.imgur.com/yfzrHiy.png\"))#just in case there isnt an icon\n\t\tembed.set_author(name=\"Blade & Soul\", icon_url=\"http://i.imgur.com/a1kk9Tq.png\")\n\t\tembed.title = NA[\"data\"][\"Market\"][nB][\"item\"][\"name\"]\n\t\tembed.url = \"https://bnstree.com/market\"\n\t\tembed.add_field(name=\"__NA__\", value=\"<:bnsgold:358757497605062676>** {} **<:bnssilver:358757506769747968>** {} **<:bnscopper:358757522321965058>** {}**\".format(NAg,NAs,NAc), inline=False)\n\t\tembed.add_field(name=\"__EU__\", value=\"<:bnsgold:358757497605062676>** {} **<:bnssilver:358757506769747968>** {} **<:bnscopper:358757522321965058>** {}**\".format(EUg,EUs,EUc))\n\t\tembed.color = 3325695\n\t\tembed.set_footer(text=\"BnSTree Market\",icon_url=\"https://i.imgur.com/3onsQoR.png\")\n\t\ttry:#trying incase bot doesnt have embed permissions\n\t\t\tawait ctx.send(embed=embed)\n\t\t\treturn\n\t\texcept Exception as e:\n\t\t\tif \"400\" in str(e):\n\t\t\t\ttry:#incase bot doesnt have permission to send messages\n\t\t\t\t\tawait ctx.send(\"This bot needs `Embed` permissions in order to use this function\")\n\t\t\t\t\treturn\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\tpass\n\n\ndef setup(bot):\n\tbot.add_cog(bladeandsoul(bot))\n", "sub_path": "cogs/bns.py", "file_name": "bns.py", "file_ext": "py", "file_size_in_byte": 24645, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "json.load", "line_number": 13, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 23, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 23, "usage_type": "name"}, {"api_name": "utils.checks.not_lounge", "line_number": 24, "usage_type": "call"}, {"api_name": "utils.checks", "line_number": 24, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 28, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 28, "usage_type": "name"}, {"api_name": "utils.checks.not_lounge", "line_number": 29, "usage_type": "call"}, {"api_name": "utils.checks", "line_number": 29, "usage_type": "name"}, {"api_name": "aiohttp.ClientSession", "line_number": 74, "usage_type": "call"}, {"api_name": "aiohttp.ClientSession", "line_number": 94, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 96, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 157, "usage_type": "call"}, {"api_name": "aiohttp.ClientSession", "line_number": 168, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 170, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 181, "usage_type": "call"}, {"api_name": "discord.Member", "line_number": 200, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 221, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 199, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 199, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 242, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 247, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 236, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 236, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 261, "usage_type": "call"}, {"api_name": "aiohttp.ClientSession", "line_number": 282, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 287, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 295, "usage_type": "call"}, {"api_name": "aiohttp.ClientSession", "line_number": 313, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 315, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 333, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 252, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 252, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 340, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 340, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 344, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 344, "usage_type": "name"}, {"api_name": "aiohttp.ClientSession", "line_number": 374, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 379, "usage_type": "call"}, {"api_name": "bs4.Comment", "line_number": 388, "usage_type": "argument"}, {"api_name": "bs4.BeautifulSoup", "line_number": 389, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 400, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 416, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 416, "usage_type": "name"}, {"api_name": "aiohttp.ClientSession", "line_number": 490, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 537, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 447, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 447, "usage_type": "name"}, {"api_name": "utils.checks.not_lounge", "line_number": 448, "usage_type": "call"}, {"api_name": "utils.checks", "line_number": 448, "usage_type": "name"}]} +{"seq_id": "153606353", "text": "import numpy as np\nimport cv2\nimport json\nimport sys\nimport os\nsys.path.append(\"/container\")\n\nfrom multiprocessing import Pool\n\n# c7 is discarded in this file, import error\n\nimport container1.app.predict as c1\nimport container2.app.predict as c2\nimport container3.app.predict as c3\nimport container4.app.predict as c4\nprint(\"Modules successfully loaded!\")\n\n#helping function\ndef image_string(image):\n image_encode=cv2.imencode('.jpg',image)[1]\n imagelist=image_encode.tolist()\n image_string=json.dumps(imagelist)\n return image_string\n\ndef string_image(imagestring):\n image_list=json.loads(imagestring)\n arr=np.array(image_list)\n arr=np.uint8(arr)\n image=cv2.imdecode(arr,cv2.IMREAD_COLOR)\n return image\n\ndef run_c1(imstr):\n result_fa = c1.predict(imstr)\n print(\"\\nFace Extraction FINISHED\")\n if result_fa==None:\n print(\"\\n[INFO] No Person Detected In This Image!\")\n return result_fa\n\ndef run_c2(imstr):\n result_drowsiness = c2.predict(imstr)\n print(\"\\nFacial Point Detection FINISHED\")\n return result_drowsiness\n\ndef run_c3(imstr):\n result_hu = c3.predict(imstr)\n print(\"\\nHuman Segmentation FINISHED\")\n if human_segmentation==None:\n print(\"\\n[INFO] No Person Detected In This Image!\")\n return result_hu\n\ndef run_c4(imstr):\n result_sleep = c4.predict(stock_data.to_json())\n print(\"\\nPrediction using Regrerssion FINISHED\")\n return result_sleep\n\ndef pipe1(imstr):\n result=run_c1(imstr)\n if result==None:\n return \"No Person!\"\n drowsiness=run_c2(result)\n if drowsiness:\n return \"Drowsiness!\"\n else:\n return \"No Drowsiness!\"\n\n\ndef pipe2(imstr):\n result=run_c3(imstr)\n if result==None:\n return \"No Person!\"\n sleep=run_c4(result)\n if sleep:\n return \"Sleeping!\"\n else:\n return \"No Sleeping\"\n\ndef run():\n print(\"\\nStart Detection: \")\n \n pipe1_result = []\n pipe2_result=[]\n count=0;\n for filename in os.listdir(\"/container/part1\"):\n if count>20:\n break\n count+=1\n print(filename)\n imag=cv2.imread(filename)\n imgstr=image_string(imag)\n# p = Pool(2)\n# pipe1_result.append(p.apply_async(pipe1, args=(imgstr,)))\n# pipe2_result.append(p.apply_async(pipe2, args=(imgstr,)))\n# p.close()\n# p.join() # p.join()方法会等待所有子进程执行完毕\n\n print(\"\\nResult of PIPE1:\")\n print(pipe1_result)\n print(\"\\nResult of PIPE2:\")\n print(pipe2_result)\n\n\nif __name__ == \"__main__\":\n run()\n", "sub_path": "applications/fatigue_w_proxy/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2547, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "sys.path.append", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "cv2.imencode", "line_number": 20, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 22, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.imdecode", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 29, "usage_type": "attribute"}, {"api_name": "container1.app.predict.predict", "line_number": 33, "usage_type": "call"}, {"api_name": "container1.app.predict", "line_number": 33, "usage_type": "name"}, {"api_name": "container2.app.predict.predict", "line_number": 40, "usage_type": "call"}, {"api_name": "container2.app.predict", "line_number": 40, "usage_type": "name"}, {"api_name": "container3.app.predict.predict", "line_number": 45, "usage_type": "call"}, {"api_name": "container3.app.predict", "line_number": 45, "usage_type": "name"}, {"api_name": "container4.app.predict.predict", "line_number": 52, "usage_type": "call"}, {"api_name": "container4.app.predict", "line_number": 52, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 83, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 88, "usage_type": "call"}]} +{"seq_id": "279354170", "text": "import json\nimport logging\nimport glob\nimport time\n\nfrom funcy.seqs import first, second, drop, flatten\nfrom hive.community.roles import get_user_role, privacy_map, permissions, is_permitted\nfrom hive.db.methods import query_one, query, db_last_block\nfrom steem.blockchain import Blockchain\nfrom steem.steemd import Steemd\nfrom steem.utils import parse_time, is_valid_account_name, json_expand\nfrom toolz import partition_all\n\nlog = logging.getLogger(__name__)\n\n\n# core\n# ----\ndef get_account_id(name):\n if is_valid_account_name(name):\n return query_one(\"SELECT id FROM hive_accounts WHERE name = '%s' LIMIT 1\" % name)\n\n\ndef get_post_id_and_depth(author, permlink):\n res = None\n if author:\n res = first(query(\n \"SELECT id, depth FROM hive_posts WHERE author = '%s' AND permlink = '%s'\" % (author, permlink)))\n return res or (None, -1)\n\n\ndef register_accounts(accounts, date):\n for account in set(accounts):\n if not get_account_id(account):\n query(\"INSERT INTO hive_accounts (name, created_at) VALUES ('%s', '%s')\" % (account, date))\n\n\ndef delete_posts(ops):\n for op in ops:\n query(\"UPDATE hive_posts SET is_deleted = 1 WHERE author = '%s' AND permlink = '%s'\" % (\n op['author'], op['permlink']))\n\n\ndef register_posts(ops, date):\n for op in ops:\n is_edit = query_one(\n \"SELECT 1 FROM hive_posts WHERE author = '%s' AND permlink = '%s'\" % (op['author'], op['permlink']))\n if is_edit:\n continue # ignore edits to posts\n\n # this method needs to perform auth checking e.g. is op.author authorized to post in op.community?\n community_or_blog = create_post_as(op) or op['author']\n\n if op['parent_author'] == '':\n parent_id = None\n depth = 0\n category = op['parent_permlink']\n else:\n parent_data = first(query(\"SELECT id, depth, category FROM hive_posts WHERE author = '%s' \"\n \"AND permlink = '%s'\" % (op['parent_author'], op['parent_permlink'])))\n parent_id, parent_depth, category = parent_data\n depth = parent_depth + 1\n\n query(\"INSERT INTO hive_posts (parent_id, author, permlink, category, community, depth, created_at) \"\n \"VALUES (%s, '%s', '%s', '%s', '%s', %d, '%s')\" % (\n parent_id or 'NULL', op['author'], op['permlink'], category, community_or_blog, depth, date))\n\n\ndef process_json_follow_op(account, op_json, block_date):\n \"\"\" This method processes any legacy 'follow' plugin ops (follow/mute/clear, reblog) \"\"\"\n if type(op_json) != list:\n return\n if first(op_json) not in ['follow', 'reblog']:\n return\n if not isinstance(second(op_json), dict):\n return\n\n cmd, op_json = op_json # ['follow', {data...}]\n if cmd == 'follow':\n if type(op_json['what']) != list:\n return\n what = first(op_json['what']) or 'clear'\n if what not in ['blog', 'clear', 'ignore']:\n return\n\n follower = op_json['follower']\n following = op_json['following']\n\n if follower != account:\n return # impersonation attempt\n if not all(filter(is_valid_account_name, [follower, following])):\n return\n\n if what == 'clear':\n query(\"DELETE FROM hive_follows WHERE follower = '%s' \"\n \"AND following = '%s' LIMIT 1\" % (follower, following))\n else:\n fields = {'follower': follower, 'following': following,\n 'created_at': block_date, 'is_muted': int(what == 'ignore')}\n query(\"INSERT IGNORE INTO hive_follows (follower, following, created_at, is_muted) \"\n \"VALUES (:follower, :following, :created_at, :is_muted) \"\n \"ON DUPLICATE KEY UPDATE is_muted = :is_muted\", **fields)\n\n elif cmd == 'reblog':\n blogger = op_json['account']\n author = op_json['author']\n permlink = op_json['permlink']\n\n if blogger != account:\n return # impersonation\n if not all(filter(is_valid_account_name, [author, blogger])):\n return\n\n post_id, depth = get_post_id_and_depth(author, permlink)\n\n if depth > 0:\n return # prevent comment reblogs\n\n if 'delete' in op_json and op_json['delete'] == 'delete':\n query(\"DELETE FROM hive_reblogs WHERE account = '%s' AND post_id = %d LIMIT 1\" % (blogger, post_id))\n else:\n query(\"INSERT IGNORE INTO hive_reblogs (account, post_id, created_at) \"\n \"VALUES ('%s', %d, '%s')\" % (blogger, post_id, block_date))\n\n\n# community methods\n# -----------------\ndef process_json_community_op(account, op_json, date):\n cmd_name, cmd_op = op_json # ['flagPost', {community: '', author: '', ...}]\n\n commands = list(flatten(permissions.values()))\n if cmd_name not in commands:\n return\n\n community = cmd_op['community']\n community_exists = is_community(community)\n\n # special case: community creation. TODO: does this require ACTIVE auth? or POSTING will suffice?\n if cmd_name == 'create' and not community_exists:\n if account != community: # only the OWNER may create\n return\n\n ctype = cmd_op['type'] # restricted, open-comment, public\n # INSERT INTO hive_communities (account, name, about, description, lang, is_nsfw, is_private, created_at)\n # VALUES ('%s', '%s', '%s', '%s', '%s', %d, %d, '%s')\" % [account, name, about, description, lang, is_nsfw ? 1 : 0, is_private ? 1 : 0, block_date]\n # INSERT ADMINS---\n\n # validate permissions\n if not community_exists or not is_permitted(account, community, cmd_name):\n return\n\n # If command references a post, ensure it's valid\n post_id, depth = get_post_id_and_depth(cmd_op.get('author'), cmd_op.get('permlink'))\n if not post_id:\n return\n\n # If command references an account, ensure it's valid\n account_id = get_account_id(cmd_op.get('account'))\n\n # If command references a list of accounts, ensure they are valid\n account_ids = list(map(get_account_id, cmd_op.get('accounts')))\n\n # ADMIN Actions\n # -------------\n if cmd_name == 'add_admins':\n assert account_ids\n # UPDATE hive_members SET is_admin = 1 WHERE account IN (%s) AND community = '%s'\n\n if cmd_name == 'remove_admins':\n assert account_ids\n # todo: validate at least one admin remains!!!\n # UPDATE hive_members SET is_admin = 0 WHERE account IN (%s) AND community = '%s'\n\n if cmd_name == 'add_mods':\n assert account_ids\n # UPDATE hive_members SET is_mod = 1 WHERE account IN (%s) AND community = '%s'\n\n if cmd_name == 'remove_mods':\n assert account_ids\n # UPDATE hive_members SET is_mod = 0 WHERE account IN (%s) AND community = '%s'\n\n # MOD USER Actions\n # ----------------\n if cmd_name == 'update_settings':\n # name, about, description, lang, is_nsfw\n # settings {bg_color, bg_color2, text_color}\n # UPDATE hive_communities SET .... WHERE community = '%s'\n assert account_id\n\n if cmd_name == 'add_posters':\n assert account_ids\n # UPDATE hive_members SET is_approved = 1 WHERE account IN (%s) AND community = '%s'\n\n if cmd_name == 'remove_posters':\n assert account_ids\n # UPDATE hive_members SET is_approved = 0 WHERE account IN (%s) AND community = '%s'\n\n if cmd_name == 'mute_user':\n assert account_id\n # UPDATE hive_members SET is_muted = 1 WHERE account = '%s' AND community = '%s'\n\n if cmd_name == 'unmute_user':\n assert account_id\n # UPDATE hive_members SET is_muted = 0 WHERE account = '%s' AND community = '%s'\n\n if cmd_name == 'set_user_title':\n assert account_id\n # UPDATE hive_members SET title = '%s' WHERE account = '%s' AND community = '%s'\n\n # MOD POST Actions\n # ----------------\n if cmd_name == 'mute_post':\n assert post_id\n # assert all([account_id, post_id])\n # UPDATE hive_posts SET is_muted = 1 WHERE community = '%s' AND author = '%s' AND permlink = '%s'\n\n if cmd_name == 'unmute_post':\n assert post_id\n # UPDATE hive_posts SET is_muted = 0 WHERE community = '%s' AND author = '%s' AND permlink = '%s'\n\n if cmd_name == 'pin_post':\n assert post_id\n # UPDATE hive_posts SET is_pinned = 1 WHERE community = '%s' AND author = '%s' AND permlink = '%s'\n\n if cmd_name == 'unpin_post':\n assert post_id\n # UPDATE hive_posts SET is_pinned = 0 WHERE community = '%s' AND author = '%s' AND permlink = '%s'\n\n # GUEST POST Actions\n # ------------------\n if cmd_name == 'flag_post':\n assert post_id\n # INSERT INTO hive_flags (account, community, author, permlink, comment, created_at) VALUES ()\n\n # track success (TODO: failures as well?)\n # INSERT INTO hive_modlog (account, community, action, created_at) VALUES (account, community, json.inspect, block_date)\n return True\n\n\ndef create_post_as(comment: dict) -> str:\n \"\"\" Given a new Steem post/comment, add it to appropriate community.\n \n For a comment to be valid, these conditions apply:\n - Post must be new (edits don't count)\n - Author is allowed to post in this community (membership & privacy)\n - Author is not muted in this community\n \n \n Args:\n comment (dict): Operation with the post to add.\n \n Returns:\n name (str): If all conditions apply, community name we're posting into.\n Otherwise, authors own name (blog) is returned.\n \"\"\"\n\n if comment['json_metadata'] == \"\":\n return None\n\n md = None\n try:\n md = json.loads(comment['json_metadata'])\n except:\n return None\n\n if md is not dict or 'community' not in md:\n return None\n\n author = comment['author']\n community = md['community']\n community_props = get_community(community)\n\n if not community_props:\n return None\n\n if is_author_muted(author, community):\n return None\n\n privacy = privacy_map[community_props['privacy']]\n if privacy == 'open':\n pass\n elif privacy == 'restricted':\n # guests cannot create top-level posts in restricted communities\n if comment['parent_author'] == \"\" and get_user_role(author, community) == 'guest':\n return None\n elif privacy == 'closed':\n # we need at least member permissions to post or comment\n if get_user_role(author, community) == 'guest':\n return None\n\n return community\n\n\ndef get_community(community_name):\n # sqlalchemy:\n # q = select([hive_communities]).where(hive_communities.c.account == community_name).limit(1)\n # conn.execute(q).fetchall()\n return first(query(\"SELECT * FROM hive_communities WHERE name = '%s' LIMIT 1\" % community_name))\n\n\ndef is_author_muted(author_name: str, community_name: str) -> bool:\n return get_user_role(author_name, community_name) is 'muted'\n\n\ndef is_community(name: str) -> bool:\n \"\"\" Given a community name, check if its a valid community.\"\"\"\n return bool(get_community(name))\n\n\n# run indexer\n# -----------\ndef process_block(block):\n date = parse_time(block['timestamp'])\n block_num = int(block['previous'][:8], base=16) + 1\n txs = block['transactions']\n\n # NOTE: currently `prev` tracks the previous block number and this is enforced with a FK constraint.\n # soon we will have access to prev block hash and current hash in the API return value, we should use this instead.\n # the FK constraint will then fail if we somehow end up on the wrong side in a fork reorg.\n query(\"INSERT INTO hive_blocks (num, prev, txs, created_at) \"\n \"VALUES ('%d', '%d', '%d', '%s')\" % (block_num, block_num - 1, len(txs), date))\n\n accounts = set()\n comments = []\n json_ops = []\n deleted = []\n for tx in txs:\n for operation in tx['operations']:\n op_type, op = operation\n\n if op_type == 'pow':\n accounts.add(op['worker_account'])\n elif op_type == 'pow2':\n accounts.add(op['work'][1]['input']['worker_account'])\n elif op_type in ['account_create', 'account_create_with_delegation']:\n accounts.add(op['new_account_name'])\n elif op_type == 'comment':\n comments.append(op)\n elif op_type == 'delete_comment':\n deleted.append(op)\n elif op_type == 'custom_json':\n json_ops.append(op)\n\n register_accounts(accounts, date) # if an account does not exist, mark it as created in this block\n register_posts(comments, date) # if this is a new post, add the entry and validate community param\n delete_posts(deleted) # mark hive_posts.is_deleted = 1\n\n for op in map(json_expand, json_ops):\n if op['id'] not in ['follow', 'com.steemit.community']:\n continue\n\n # we are assuming `required_posting_auths` is always used and length 1.\n # it may be that some ops will require `required_active_auths` instead\n # (e.g. if we use that route for admin action of acct creation)\n # if op['required_active_auths']:\n # log.warning(\"unexpected active auths: %s\" % op)\n if len(op['required_posting_auths']) != 1:\n log.warning(\"unexpected auths: %s\" % op)\n continue\n\n account = op['required_posting_auths'][0]\n op_json = op['json']\n\n if op['id'] == 'follow':\n if block_num < 6000000 and type(op_json) != list:\n op_json = ['follow', op_json] # legacy compat\n process_json_follow_op(account, op_json, date)\n elif op['id'] == 'com.steemit.community':\n process_json_community_op(account, op_json, date)\n\n\ndef process_blocks(blocks):\n query(\"START TRANSACTION\")\n for block in blocks:\n process_block(block)\n query(\"COMMIT\")\n\n\ndef sync_from_checkpoints():\n last_block = db_last_block()\n\n fn = lambda f: [int(f.split('/')[1].split('.')[0]), f]\n files = map(fn, glob.glob(\"checkpoints/*.json.lst\"))\n files = sorted(files, key = lambda f: f[0])\n\n last_read = 0\n for (num, path) in files:\n if last_block < num:\n print(\"[SYNC] Load {} -- last block: {}\".format(path, last_block))\n skip_lines = last_block - last_read\n sync_from_file(path, skip_lines)\n last_block = num\n last_read = num\n\n\ndef sync_from_file(file_path, skip_lines, chunk_size=250):\n with open(file_path) as f:\n # each line in file represents one block\n # we can skip the blocks we already have\n remaining = drop(skip_lines, f)\n for batch in partition_all(chunk_size, remaining):\n process_blocks(map(json.loads, batch))\n\n\ndef sync_from_steemd():\n s = Steemd()\n\n lbound = db_last_block() + 1\n ubound = s.last_irreversible_block_num\n\n start_num = lbound\n start_time = time.time()\n while lbound < ubound:\n to = min(lbound + 1000, ubound)\n blocks = s.get_blocks_range(lbound, to)\n lbound = to\n process_blocks(blocks)\n\n rate = (lbound - start_num) / (time.time() - start_time)\n print(\"[SYNC] Got block {} ({}/s) {}m remaining\".format(\n to - 1, round(rate, 1), round((ubound-lbound) / rate / 60, 2)))\n\n\ndef listen_steemd():\n b = Blockchain()\n h = b.stream_from(\n start_block=db_last_block() + 1,\n full_blocks=True,\n )\n for block in h:\n num = int(block['previous'][:8], base=16) + 1\n print(\"[LIVE] Got block {} at {} with {} txs\".format(num,\n block['timestamp'], len(block['transactions'])))\n process_blocks([block])\n\n\n# testing\n# -------\ndef run():\n # fast-load checkpoint files\n sync_from_checkpoints()\n # fast-load from steemd\n sync_from_steemd()\n # follow head blocks\n listen_steemd()\n\n\ndef head_state(*args):\n _ = args # JSONRPC injects 4 arguments here\n steemd_head = Steemd().last_irreversible_block_num\n hive_head = db_last_block()\n diff = steemd_head - hive_head\n return dict(steemd=steemd_head, hive=hive_head, diff=diff)\n\n\nif __name__ == '__main__':\n # setup()\n run()\n", "sub_path": "hive/indexer/core.py", "file_name": "core.py", "file_ext": "py", "file_size_in_byte": 16298, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "steem.utils.is_valid_account_name", "line_number": 20, "usage_type": "call"}, {"api_name": "hive.db.methods.query_one", "line_number": 21, "usage_type": "call"}, {"api_name": "funcy.seqs.first", "line_number": 27, "usage_type": "call"}, {"api_name": "hive.db.methods.query", "line_number": 27, "usage_type": "call"}, {"api_name": "hive.db.methods.query", "line_number": 35, "usage_type": "call"}, {"api_name": "hive.db.methods.query", "line_number": 40, "usage_type": "call"}, {"api_name": "hive.db.methods.query_one", "line_number": 46, "usage_type": "call"}, {"api_name": "funcy.seqs.first", "line_number": 59, "usage_type": "call"}, {"api_name": "hive.db.methods.query", "line_number": 59, "usage_type": "call"}, {"api_name": "hive.db.methods.query", "line_number": 64, "usage_type": "call"}, {"api_name": "funcy.seqs.first", "line_number": 73, "usage_type": "call"}, {"api_name": "funcy.seqs.second", "line_number": 75, "usage_type": "call"}, {"api_name": "funcy.seqs.first", "line_number": 82, "usage_type": "call"}, {"api_name": "steem.utils.is_valid_account_name", "line_number": 91, "usage_type": "argument"}, {"api_name": "hive.db.methods.query", "line_number": 95, "usage_type": "call"}, {"api_name": "hive.db.methods.query", "line_number": 100, "usage_type": "call"}, {"api_name": "steem.utils.is_valid_account_name", "line_number": 111, "usage_type": "argument"}, {"api_name": "hive.db.methods.query", "line_number": 120, "usage_type": "call"}, {"api_name": "hive.db.methods.query", "line_number": 122, "usage_type": "call"}, {"api_name": "funcy.seqs.flatten", "line_number": 131, "usage_type": "call"}, {"api_name": "hive.community.roles.permissions.values", "line_number": 131, "usage_type": "call"}, {"api_name": "hive.community.roles.permissions", "line_number": 131, "usage_type": "name"}, {"api_name": "hive.community.roles.is_permitted", "line_number": 149, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 262, "usage_type": "call"}, {"api_name": "hive.community.roles.privacy_map", "line_number": 279, "usage_type": "name"}, {"api_name": "hive.community.roles.get_user_role", "line_number": 284, "usage_type": "call"}, {"api_name": "hive.community.roles.get_user_role", "line_number": 288, "usage_type": "call"}, {"api_name": "funcy.seqs.first", "line_number": 298, "usage_type": "call"}, {"api_name": "hive.db.methods.query", "line_number": 298, "usage_type": "call"}, {"api_name": "hive.community.roles.get_user_role", "line_number": 302, "usage_type": "call"}, {"api_name": "steem.utils.parse_time", "line_number": 313, "usage_type": "call"}, {"api_name": "hive.db.methods.query", "line_number": 320, "usage_type": "call"}, {"api_name": "steem.utils.json_expand", "line_number": 348, "usage_type": "argument"}, {"api_name": "hive.db.methods.query", "line_number": 373, "usage_type": "call"}, {"api_name": "hive.db.methods.query", "line_number": 376, "usage_type": "call"}, {"api_name": "hive.db.methods.db_last_block", "line_number": 380, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 383, "usage_type": "call"}, {"api_name": "funcy.seqs.drop", "line_number": 400, "usage_type": "call"}, {"api_name": "toolz.partition_all", "line_number": 401, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 402, "usage_type": "attribute"}, {"api_name": "steem.steemd.Steemd", "line_number": 406, "usage_type": "call"}, {"api_name": "hive.db.methods.db_last_block", "line_number": 408, "usage_type": "call"}, {"api_name": "time.time", "line_number": 412, "usage_type": "call"}, {"api_name": "time.time", "line_number": 419, "usage_type": "call"}, {"api_name": "steem.blockchain.Blockchain", "line_number": 425, "usage_type": "call"}, {"api_name": "hive.db.methods.db_last_block", "line_number": 427, "usage_type": "call"}, {"api_name": "steem.steemd.Steemd", "line_number": 450, "usage_type": "call"}, {"api_name": "hive.db.methods.db_last_block", "line_number": 451, "usage_type": "call"}]} +{"seq_id": "175676586", "text": "from decimal import Decimal\n\nfrom bs4 import BeautifulSoup\n\nfrom storescraper.product import Product\nfrom storescraper.store import Store\nfrom storescraper.utils import session_with_proxy, html_to_markdown\nfrom storescraper.categories import TELEVISION, STEREO_SYSTEM, CELL, \\\n REFRIGERATOR, OVEN, AIR_CONDITIONER, WASHING_MACHINE, STOVE, MONITOR\n\n\nclass Jetstereo(Store):\n base_url = 'https://www.jetstereo.com'\n\n @classmethod\n def categories(cls):\n return [\n TELEVISION,\n STEREO_SYSTEM,\n CELL,\n REFRIGERATOR,\n OVEN,\n AIR_CONDITIONER,\n WASHING_MACHINE,\n STOVE,\n MONITOR\n ]\n\n @classmethod\n def discover_urls_for_category(cls, category, extra_args=None):\n category_filters = [\n ('tvs', TELEVISION),\n ('audio-portatil', STEREO_SYSTEM),\n ('equipos-de-sonido', STEREO_SYSTEM),\n ('teatros-en-casa', STEREO_SYSTEM),\n ('smartphones', CELL),\n ('refrigeradoras-side-by-side', REFRIGERATOR),\n ('refrigeradoras-french-door', REFRIGERATOR),\n ('refrigeradoras-twin', REFRIGERATOR),\n ('refrigeradora-top-mount', REFRIGERATOR),\n ('microondas', OVEN),\n ('hornos', OVEN),\n ('aire-acondicionado', AIR_CONDITIONER),\n ('twinwash', WASHING_MACHINE),\n ('lavadoras-top-load', WASHING_MACHINE),\n ('lavadora-carga-frontal', WASHING_MACHINE),\n ('secadoras', WASHING_MACHINE),\n ('estufas-electricas', STOVE),\n ('estufas-de-gas', STOVE),\n ('monitores', MONITOR),\n ]\n\n session = session_with_proxy(extra_args)\n product_urls = []\n\n for category_path, local_category in category_filters:\n if local_category != category:\n continue\n\n url = '{}/{}?pv=1000'.format(cls.base_url, category_path)\n soup = BeautifulSoup(session.get(url, verify=False).text,\n 'html.parser')\n containers = soup.findAll('div', 'product-slide-entry')\n\n if not containers:\n raise Exception('Empty category: ' + url)\n\n for container in containers:\n product_title = container.find('a', 'title')\n if 'LG' not in product_title.text.upper():\n continue\n\n product_url = '{}{}'\\\n .format(cls.base_url, container.find('a')['href'])\n product_urls.append(product_url)\n\n return product_urls\n\n @classmethod\n def products_for_url(cls, url, category=None, extra_args=None):\n session = session_with_proxy(extra_args)\n data = session.get(url, allow_redirects=False, verify=False)\n\n if data.status_code == 302:\n return []\n\n soup = BeautifulSoup(data.text, 'html.parser')\n sku_container = soup.find('div', 'star')\n\n if not sku_container:\n return []\n\n sku = sku_container.find('h4').text.replace('SKU: ', '').strip()\n name = '{} ({})'\\\n .format(soup.find('div', 'article-container').find('h1').text, sku)\n\n if soup.find('div', 'share-box').find('a', 'add-to-cart-btn'):\n stock = -1\n else:\n stock = 0\n\n price = Decimal(soup.find('div', 'price').find('div', 'current')\n .text.strip().replace('L. ', '').replace(',', ''))\n\n picture_urls = []\n pictures = soup.findAll('div', 'product-zoom-image')\n\n for picture in pictures:\n picture_url = picture.find('img')['src'].replace(' ', '%20')\n if 'https:' not in picture_url:\n picture_url = '{}{}'.format(cls.base_url, picture_url)\n picture_urls.append(picture_url)\n\n description = html_to_markdown(str(soup.find('ul', 'read-more-wrap')))\n\n p = Product(\n name,\n cls.__name__,\n category,\n url,\n url,\n sku,\n stock,\n price,\n price,\n 'HNL',\n sku=sku,\n picture_urls=picture_urls,\n description=description\n )\n\n return [p]\n", "sub_path": "storescraper/stores/jetstereo.py", "file_name": "jetstereo.py", "file_ext": "py", "file_size_in_byte": 4283, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "storescraper.store.Store", "line_number": 12, "usage_type": "name"}, {"api_name": "storescraper.categories.TELEVISION", "line_number": 18, "usage_type": "name"}, {"api_name": "storescraper.categories.STEREO_SYSTEM", "line_number": 19, "usage_type": "name"}, {"api_name": "storescraper.categories.CELL", "line_number": 20, "usage_type": "name"}, {"api_name": "storescraper.categories.REFRIGERATOR", "line_number": 21, "usage_type": "name"}, {"api_name": "storescraper.categories.OVEN", "line_number": 22, "usage_type": "name"}, {"api_name": "storescraper.categories.AIR_CONDITIONER", "line_number": 23, "usage_type": "name"}, {"api_name": "storescraper.categories.WASHING_MACHINE", "line_number": 24, "usage_type": "name"}, {"api_name": "storescraper.categories.STOVE", "line_number": 25, "usage_type": "name"}, {"api_name": "storescraper.categories.MONITOR", "line_number": 26, "usage_type": "name"}, {"api_name": "storescraper.categories.TELEVISION", "line_number": 32, "usage_type": "name"}, {"api_name": "storescraper.categories.STEREO_SYSTEM", "line_number": 33, "usage_type": "name"}, {"api_name": "storescraper.categories.STEREO_SYSTEM", "line_number": 34, "usage_type": "name"}, {"api_name": "storescraper.categories.STEREO_SYSTEM", "line_number": 35, "usage_type": "name"}, {"api_name": "storescraper.categories.CELL", "line_number": 36, "usage_type": "name"}, {"api_name": "storescraper.categories.REFRIGERATOR", "line_number": 37, "usage_type": "name"}, {"api_name": "storescraper.categories.REFRIGERATOR", "line_number": 38, "usage_type": "name"}, {"api_name": "storescraper.categories.REFRIGERATOR", "line_number": 39, "usage_type": "name"}, {"api_name": "storescraper.categories.REFRIGERATOR", "line_number": 40, "usage_type": "name"}, {"api_name": "storescraper.categories.OVEN", "line_number": 41, "usage_type": "name"}, {"api_name": "storescraper.categories.OVEN", "line_number": 42, "usage_type": "name"}, {"api_name": "storescraper.categories.AIR_CONDITIONER", "line_number": 43, "usage_type": "name"}, {"api_name": "storescraper.categories.WASHING_MACHINE", "line_number": 44, "usage_type": "name"}, {"api_name": "storescraper.categories.WASHING_MACHINE", "line_number": 45, "usage_type": "name"}, {"api_name": "storescraper.categories.WASHING_MACHINE", "line_number": 46, "usage_type": "name"}, {"api_name": "storescraper.categories.WASHING_MACHINE", "line_number": 47, "usage_type": "name"}, {"api_name": "storescraper.categories.STOVE", "line_number": 48, "usage_type": "name"}, {"api_name": "storescraper.categories.STOVE", "line_number": 49, "usage_type": "name"}, {"api_name": "storescraper.categories.MONITOR", "line_number": 50, "usage_type": "name"}, {"api_name": "storescraper.utils.session_with_proxy", "line_number": 53, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 61, "usage_type": "call"}, {"api_name": "storescraper.utils.session_with_proxy", "line_number": 81, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 87, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 102, "usage_type": "call"}, {"api_name": "storescraper.utils.html_to_markdown", "line_number": 114, "usage_type": "call"}, {"api_name": "storescraper.product.Product", "line_number": 116, "usage_type": "call"}]} +{"seq_id": "71553666", "text": "from django.db import models\n\nclass Customer(models.Model):\n\tcustomer_name = models.CharField(\n\t\tmax_length=128,\n\t\tblank=True,\n\t\tnull=True,\n\t\tverbose_name='Nombre',\n\t\thelp_text='Ingrese el Nombre Completo'\n\t\t)\n\tcustomer_address = models.CharField(\n\t\tmax_length=200,\n\t\tblank=True,\n\t\tnull=True,\n\t\tverbose_name='Direccion',\n\t\thelp_text='Ingrese la direccion del cliente')\n\tcustomer_phone = models.CharField(\n\t\tmax_length=24,\n\t\tblank=True,\n\t\tnull=True,\n\t\tverbose_name='Telefono',\n\t\thelp_text='Ingrese el telefono del cliente')\n\n\tdef __str__(self):\n\t\treturn self.customer_name\n\nclass Product(models.Model):\n\tProduct_name = models.CharField(\n\t\tmax_length=128,\n\t\tblank=True,\n\t\tnull=True,\n\t\tverbose_name='Product',\n\t\thelp_text='Ingrese el Nombre del Producto'\n\t\t)\n\tProduct_price = models.DecimalField(\n\t\tmax_digits=64,\n\t\tdecimal_places = 2,\n\t\tverbose_name='Precio',\n\t\thelp_text='Precio del producto')\n\tProduct_type = models.CharField(\n\t\tmax_length=128,\n\t\tblank=True,\n\t\tnull=True,\n\t\tverbose_name='Tipo de Producto',\n\t\thelp_text='Ingrese el tipo del producto al que pertenece')\n\tProduct_description = models.TextField(\n\t\tmax_length = 400,\n\t\tverbose_name = 'Descripcion del texto',\n\t\thelp_text = 'Ingrese la descripcion del producto'\n\t\t)\n\n\tdef __str__(self):\n\t\treturn self.Product_name\n\nclass Stock(models.Model):\n\tstock_prodcut_id = models.ForeignKey('Product')\n\tstock_quantity = models.IntegerField(\n\t\tmax_length = 24,\n\t\tverbose_name = 'Cantidad del producto',\n\t\thelp_text = 'Ingese la cantidad'\n\t\t)\n\nclass Order(models.Model):\n\torder_customer_id = models.ForeignKey('Customer')\n\torder_product_id = models.ForeignKey('Product')\n\torder_amount = models.IntegerField(\n\t\tmax_length = 24,\n\t\tverbose_name = 'Cantidad',\n\t\thelp_text = 'Cantidad del producto a incluir en la orden.')\n\toder_date = models.DateField(auto_now=True)", "sub_path": "Lab4/order/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1810, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.db.models.Model", "line_number": 3, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 3, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 4, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 4, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 11, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.models.DecimalField", "line_number": 35, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 35, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 40, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 40, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 46, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 46, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 55, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 55, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 56, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 56, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 57, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 57, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 63, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 63, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 64, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 64, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 65, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 65, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 66, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 66, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 70, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 70, "usage_type": "name"}]} +{"seq_id": "607687358", "text": "\"\"\" NatModelAtProv.py\n\nUsing the national level model to do province level\nextrapolation.\"\"\"\nimport sys\nsys.path.insert(0,\"..\\\\..\\\\\")\n\n## Standard imports \nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport shapefile\n\n## Risk map tools\nfrom riskmap3.tsir import TSIR\nfrom riskmap3.map_maker import *\nfrom riskmap3.data_process.vis_tools import *\n\n## Overwrite some risk map defaults\nplt.rcParams[\"font.size\"] = 22.\n\n## Get the data\npickle_jar = \"..\\\\pickle_jar\\\\\"\nri = pd.read_pickle(pickle_jar+\"extrapolated_ri.pkl\").rename(\"mcv1\")\npop = pd.read_pickle(pickle_jar+\"extrapolated_population.pkl\").rename(\"population\")\ncases = pd.read_pickle(pickle_jar+\"smoothed_cases.pkl\").rename(\"cases\")\nrejected = pd.read_pickle(pickle_jar+\"rejected.pkl\").rename(\"rejected\")\nbr_wp = pd.read_pickle(pickle_jar+\"extrapolated_birth_rate.pkl\").rename(\"world_pop\")\nbr_dhs = pd.read_pickle(pickle_jar+\"extrapolated_dhs_birth_rate.pkl\").rename(\"dhs\")\nsia = pd.read_pickle(pickle_jar+\"extrapolated_sia_nofuture.pkl\").rename(\"sia\")\nmcv2 = pd.read_pickle(pickle_jar+\"extrapolated_mcv2.pkl\").rename(\"mcv2\")\n\n## Save the computed traces?\nprovince = \"asia:pakistan:islamabad\"\n_npz_name = None #\"islamabad_12_19\"\n\n## For later comparison\nupdated_2017 = pd.read_pickle(pickle_jar+\"updated_smoothed_cases.pkl\")\nupdated_cases = updated_2017.loc[province]\n\n## Update the cases we use for fitting?\ncases = updated_2017.reindex(cases.index).fillna(cases)\n\n## Combine birth rates estimates to take the world pop\n## where it's good and dhs otherwise.\nbr_wp.loc[\"asia:pakistan:gilgit baltistan\"] = np.nan\nbr = br_wp.fillna(br_dhs).rename(\"birth_rate\")\n\n## Create full DF (where we drop NA because we don't have DHS clusters in\n## all states. Could instead use interpolation here).\n## Now we also drop NaNs because this is gonna be used for model calibration,\n## and we want to pass the TSIR class only data with corresponding cases.\ncombined = pd.concat([ri,mcv2,pop,cases,br,sia],axis=1).dropna()\nextrapolation = pd.concat([ri,mcv2,pop,br,sia],axis=1).dropna()\n\n## Choose relevant times (cutting off the early stuff due to time dependent\n## changes in reporting).\ncombined = combined.loc(axis=0)[:,pd.to_datetime(\"01-01-2012\"):]#pd.to_datetime(\"09-24-2017\")]\nextrapolation = extrapolation.loc(axis=0)[:,pd.to_datetime(\"01-01-2012\"):]\n\n## Adjusted_births\ndef adj_births(df):\n\tbirths = df.birth_rate*df.population/1000.\n\ttry:\n\t\tmcv2 = df.mcv2\n\texcept:\n\t\tmcv2 = 0.\n\treturn births*(1.-0.9*df.mcv1*(1.-mcv2)-0.99*df.mcv1*mcv2)\ncombined[\"adj_births\"] = adj_births(combined)\n\n## Aggregate up\n## National level model\ndef up_sample(x):\n\ttotal_pop = x.population.sum()\n\tmcv1 = np.sum(x.population*x.mcv1)/total_pop\n\tmcv2 = np.sum(x.population*x.mcv2)/total_pop\n\tbr = np.sum(x.population*x.birth_rate)/total_pop\n\tsia = np.sum(x.population*x.sia)/total_pop\n\tseries = pd.Series([mcv1,mcv2,total_pop,x.cases.sum(),br,sia],\n\t\t\t\t\t index=[\"mcv1\",\"mcv2\",\"population\",\"cases\",\"birth_rate\",\"sia\"])\n\treturn series\n\n## Create grouped and subset data frames\nprov_name = province[province.rfind(\":\")+1:]\nnational = combined.groupby(level=1).apply(up_sample)\nextrap = extrapolation.loc[province]\nextrap[\"adj_births\"] = adj_births(extrap)\n\n## Create national model\nnat_model = TSIR(national)\nnat_model.mle(detrended=True,weighted=True,verbose=False)\nnat_model.transmission_regression(periodicity=24)\n\n## Get province level reporting rates, etc.\n## Reconstruct susceptibles, etc.\np_model = TSIR(combined.loc[province])\np_model.mle(detrended=True,weighted=True,verbose=False)\n\n########################## Full Forward/One step projection\nstd_logE = nat_model.std_logE\nnum_samples = 10000\nfull_samples = np.zeros((num_samples,len(extrap)))\nfull_samples_S = np.zeros((num_samples,len(extrap)))\none_step_samples = np.zeros((num_samples,len(extrap)))\none_step_S = np.zeros((num_samples,len(extrap)))\npop_fraction = p_model.df.population.mean()/nat_model.df.population.mean()\n\n## Set up ICs\nI_inferred = p_model.beta[0]*(p_model.cases+1.)-1.\nupdated_I_inferred = p_model.beta[0]*(updated_cases + 1.) - 1.\nfull_samples_S[:,0] = pop_fraction*nat_model.S_bar + p_model.Z[0]\nfull_samples[:,0] = I_inferred[0]\none_step_samples[:,0] = I_inferred[0]\none_step_S[:,0] = pop_fraction*nat_model.S_bar + p_model.Z[0]\n\n## Loop through time\nfor i in range(1,len(extrap)):\n\t\n\t## Time of year for seasonality\n\ttime_in_period = i % nat_model.periodicity\n\n\t## Update one step ahead and full projection lambdas\n\tlam = (nat_model.t_beta[time_in_period]/pop_fraction)*(full_samples_S[:,i-1])*(full_samples[:,i-1]**nat_model.alpha)\n\n\t## If we have data, we compute the one_step ahead projection\n\tif i <= p_model.n_steps:\n\t\tlam_one_step = (nat_model.t_beta[time_in_period]/pop_fraction)*(one_step_S[:,i-1])*(I_inferred[i-1]**nat_model.alpha)\n\telse:\n\t\tlam_one_step = (nat_model.t_beta[time_in_period]/pop_fraction)*(one_step_S[:,i-1])*(one_step_samples[:,i-1]**nat_model.alpha)\n\n\t## Sample for new infecteds in both cases\n\tI_ts = lam*np.exp(std_logE*np.random.normal(size=(num_samples,)))\n\tS_ts = (full_samples_S[:,i-1]+extrap.adj_births[i-1]-I_ts)*(1. - extrap.sia[i-1])\n\n\t## Update predictions and residuals\n\tfull_samples_S[:,i] = S_ts\n\tfull_samples[:,i] = I_ts\n\tone_step_samples[:,i] = lam_one_step*np.exp(std_logE*np.random.normal(size=(num_samples,)))\n\tone_step_S[:,i] = (one_step_S[:,i-1]+extrap.adj_births[i-1]-one_step_samples[:,i])*(1. - extrap.sia[i-1])\n\t\n\n##### plot the results\ndef low_mid_high(samples):\n\tlow = np.percentile(samples,2.5,axis=0)\n\tmid = np.mean(samples,axis=0)#np.percentile(samples,50.,axis=0)\n\thigh = np.percentile(samples,97.5,axis=0)\n\treturn low,mid,high\n\nfig, axes = plt.subplots(2,1,sharex=True,figsize=(14,10))\n## Add sias to plots\ntoday = pd.to_datetime(\"13-11-2017\",format=\"%d-%m-%Y\")\nfor x in p_model.df[p_model.df[\"sia\"] != 0.].index:\n\taxes[0].axvline(x,c=\"k\",alpha=0.4,ls=\"dashed\")\n\nfull_low, full_mid, full_high = low_mid_high(full_samples)\nfull_low_S, full_mid_S, full_high_S = low_mid_high(full_samples_S)\none_step_low, one_step_mid, one_step_high = low_mid_high(one_step_samples)\nos_S_low, os_S_mid, os_S_high = low_mid_high(one_step_S)\n\n## 2012 - 2018 extrapolation\nI = len(p_model.df.index)\naxes[0].fill_between(p_model.df.index,full_low_S[:I],full_high_S[:I],color=\"C1\",alpha=0.2)\naxes[0].plot(p_model.df.index,full_mid_S[:I],color=\"C1\",label=r\"S$_t$ | C$_0$\")\naxes[1].fill_between(p_model.df.index,full_low[:I],full_high[:I],color=\"C3\",alpha=0.2)\naxes[1].plot(p_model.df.index,full_mid[:I],color=\"C3\",label=r\"I$_t$ | C$_0$\")\n\n## 2018 and beyond projectsion (one_step from 2012 to 2018)\naxes[0].fill_between(extrap.index,os_S_low,os_S_high,color=\"C0\",alpha=0.2)\naxes[0].plot(extrap.index,os_S_mid,color=\"C0\",label=r\"S$_t$ | C$_{t-1}$\")\naxes[1].fill_between(extrap.index,one_step_low,one_step_high,color=\"C4\",alpha=0.2)\naxes[1].plot(extrap.index,one_step_mid,color=\"C4\",label=r\"I$_t$ | C$_{t-1}$\")\n\n## data\naxes[1].plot(p_model.df.index,I_inferred,label=r\"Scaled C$_t$\",color=\"k\",marker=\".\",ls=\"None\")\n#axes[1].plot(updated_I_inferred,label=\"Updated data\",color=\"C3\",marker=\"x\",ls=\"None\")\n\n## Legend\naxes[0].plot([],c=\"k\",alpha=0.5,ls=\"dashed\",label=\"SIA\")\naxes[0].legend(loc=1)\naxes[1].legend(loc=1)\naxes[1].set(ylabel=\"Infecteds\")\naxes[0].set(ylabel=\"Susceptibles\")\naxes[0].ticklabel_format(axis=\"y\",style=\"sci\",scilimits=(0,1))\naxes[1].ticklabel_format(axis=\"y\",style=\"sci\",scilimits=(0,1))\nplt.tight_layout()\nplt.savefig(\"..\\\\_plots\\\\\"+prov_name.replace(\" \",\"_\")+\"_nosia.pdf\")\n\n## Save samples\nif _npz_name is not None:\n\tnp.savez(\"..\\\\npz_jar\\\\\"+_npz_name+\"_sia.npz\",I_samples=one_step_samples,S_samples=one_step_S)\n\n\n## Sum the cases after a given time\ncut_off_time = pd.to_datetime(\"31-12-2017\",format=\"%d-%m-%Y\")\nI = np.argmin(np.abs(extrap.index - cut_off_time))\nfuture_cases = np.sum(one_step_mid[I:])\nprint(\"Infections after 2018 in this model = %.0f\" % future_cases)\nplt.show()", "sub_path": "Decreasing_measles_burden_by_optimizing_campaign_timing_Pythoncode/Pakistan/tsir_model/NatModelAtProv.py", "file_name": "NatModelAtProv.py", "file_ext": "py", "file_size_in_byte": 7868, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "sys.path.insert", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 20, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "pandas.read_pickle", "line_number": 24, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 26, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 28, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 29, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 30, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 31, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 53, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 54, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 58, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 78, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 79, "usage_type": "call"}, {"api_name": "riskmap3.tsir.TSIR", "line_number": 90, "usage_type": "call"}, {"api_name": "riskmap3.tsir.TSIR", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 132, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 138, "usage_type": "attribute"}, {"api_name": "numpy.percentile", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 149, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 149, "usage_type": "name"}, {"api_name": "pandas.to_datetime", "line_number": 151, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 185, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 186, "usage_type": "name"}, {"api_name": "numpy.savez", "line_number": 190, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 198, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 198, "usage_type": "name"}]} +{"seq_id": "327252934", "text": "from DroneDenoise.DataHandler.DataHandler import SignalsHandler\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import signal\n\nif __name__ == '__main__':\n signal_size = 1\n window_size = 960\n\n sh = SignalsHandler('D:\\\\private_git\\\\DroneDenoise\\\\Data\\\\Extracted_Raw_Drone\\\\sides')\n s = sh.get_signal()\n s.add_noise()\n s_noise = s.X\n s_clean = s.Y\n\n _, _, noise_stft = signal.stft(s_noise, fs=48000, nperseg=window_size, nfft=window_size)\n _, _, clean_stft = signal.stft(s_clean, fs=48000, nperseg=window_size, nfft=window_size)\n\n _, ax = plt.subplots()\n ax.plot(np.abs(noise_stft[:, 3000]), label='noise')\n ax.plot(np.abs(clean_stft[:, 3000]), label='clean')\n plt.legend(loc='best')\n plt.grid(True)\n plt.show()", "sub_path": "DataHandler/print_stft.py", "file_name": "print_stft.py", "file_ext": "py", "file_size_in_byte": 767, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "DroneDenoise.DataHandler.DataHandler.SignalsHandler", "line_number": 10, "usage_type": "call"}, {"api_name": "scipy.signal.stft", "line_number": 16, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 16, "usage_type": "name"}, {"api_name": "scipy.signal.stft", "line_number": 17, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "numpy.abs", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}]} +{"seq_id": "131030858", "text": "from pymorphy2 import MorphAnalyzer\nfrom string import punctuation\nimport json\n\n\ndef parse_text(data):\n text = ' '.join(data)\n text = [word.lower().strip(punctuation) for word in text.split()]\n text = [word for word in text if word != '']\n result = []\n morph = MorphAnalyzer()\n for word in text:\n parser = morph.parse(word)[0]\n dct = {'word': str(parser.word), 'lemma': str(parser.normal_form), 'tag': str(parser.tag)}\n result.append(dct)\n return result\n\n\nif __name__ == \"__main__\":\n with open(input('Путь к файлу с текстом: '), 'r', encoding='utf-8') as file:\n messages = json.load(file)\n with open(input('Назвать новый файл: '), 'w', encoding='utf-8') as new_file:\n json.dump(parse_text(messages), new_file, ensure_ascii=False, indent=2)\n", "sub_path": "code/parser.py", "file_name": "parser.py", "file_ext": "py", "file_size_in_byte": 836, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "string.punctuation", "line_number": 8, "usage_type": "argument"}, {"api_name": "pymorphy2.MorphAnalyzer", "line_number": 11, "usage_type": "call"}, {"api_name": "json.load", "line_number": 21, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "528079277", "text": "import cartopy.crs as ccrs\nimport xarray as xr\nimport matplotlib.pyplot as plt\n\nimport cartopy\n\ndef plotaMERGE(ano):\n\n #path = \"/dados/dmdpesq/Experimento_umidade_do_solo/MERGE/\"\n path = \"/dados/dmdpesq/MERGE/\"\n path_out =\"/dados/dmdpesq/out_wrf/\"\n\n \n name_file_1 = 'prec_concate_'+ano+'08.nc'\n name_file_2 = 'prec_concate_'+ano+'09.nc'\n print(name_file_1)\n print(name_file_2)\n\n \n\n umidade = 'Mean_Agosto_Setembro_MERGE'\n text = 'Condição inicial:' + umidade\n MERGE_1 = xr.open_dataset(path + name_file_1)\n MERGE_2 = xr.open_dataset(path + name_file_2)\n da_1 = MERGE_1.prec.mean('time')\n da_2 = MERGE_2.prec.mean('time')\n\n da = (da_1 + da_2)/2\n\n\n lons = MERGE_1.variables['lon'][:]\n lats = MERGE_1.variables['lat'][:]\n fig, ax = plt.subplots(111,figsize=(15,15), dpi=200)\n ax = plt.axes(projection=ccrs.PlateCarree())\n clevs=[-70,2,4,6,8,10,12,14,16,18,70]\n color=['white','dodgerblue','darkturquoise','mediumspringgreen','lime','yellow',\n 'orange','goldenrod','red','firebrick']\n cp = plt.contourf(lons,lats,da, clevs, colors=color,zorder=1)\n ax.coastlines(resolution='110m') #“110m”, “50m”, and “10m”.\n ax.add_feature(cartopy.feature.BORDERS, linestyle=':')\n #for BR\n ax.set_extent([-83, -34, -47.5, 10])\n ax.stock_img()\n ax.set_title(\n 'MERGE ' \n + ano \n + '\\n'\n +'Mean Agosto + Setembro 12Z'\n + '\\n'\n + ' ',\n fontsize=18\n )\n fig.colorbar(cp, orientation='horizontal',pad=0.05)\n fig.set_label('mm')\n title = umidade + '_'+ ano + '_12Z_.png'\n plt.savefig(path_out + title, bbox_inches='tight', pad_inches=.2, dpi=300)\n print('Saved: {}'.format(title))\n plt.close()\n return\n\nano = '2018'\nplotaMERGE(ano)", "sub_path": "wrf_CDO_PY/mergePlot2meses.py", "file_name": "mergePlot2meses.py", "file_ext": "py", "file_size_in_byte": 1823, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "xarray.open_dataset", "line_number": 23, "usage_type": "call"}, {"api_name": "xarray.open_dataset", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "cartopy.crs.PlateCarree", "line_number": 34, "usage_type": "call"}, {"api_name": "cartopy.crs", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.contourf", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "cartopy.feature", "line_number": 40, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}]} +{"seq_id": "536656080", "text": "from unittest import TestCase\nfrom django.core.exceptions import ImproperlyConfigured\nfrom rest_framework.test import APIRequestFactory\nfrom tests.models import TestObject\nfrom tests.serializers import TestObjectSerializer\nfrom tests.views import TestObjectViewSet\n\n\nclass DynamicFieldsMixinTests(TestCase):\n\n def get_request(self, address=\"\"):\n \"\"\"\n Get a Django Rest Framework request from an address using a Request Factory.\n :param address:\n :return:\n \"\"\"\n return TestObjectViewSet(action_map={'get': 'list'}).initialize_request(self.factory.get(address))\n\n def setUp(self):\n self.factory = APIRequestFactory()\n self.test_object = TestObject(field_one=\"1\", field_two=\"2\", field_three=\"3\", field_four=\"4\")\n\n def test_exception_raised_without_request_in_context(self):\n with self.assertRaises(ImproperlyConfigured):\n TestObjectSerializer(self.test_object, context={}).data\n\n def test_exception_raised_without_context(self):\n with self.assertRaises(ImproperlyConfigured):\n TestObjectSerializer(self.test_object).data\n\n def test_serializer_get_with_no_fields_specified_returns_default(self):\n request = self.get_request(\"test\")\n data1 = TestObjectSerializer(self.test_object, context={\"request\": request}).data\n request = self.get_request(\"test/?field_set=default\")\n data2 = TestObjectSerializer(self.test_object, context={\"request\": request}).data\n self.assertEqual(data1, data2)\n\n def test_serializer_exclude_returns_everything_in_default_except_excluded_object(self):\n request = self.get_request(\"test/?exclude_fields=field_one\")\n data = TestObjectSerializer(self.test_object, context={\"request\": request}).data\n self.assertNotIn(\"field_one\", data.keys())\n self.assertIn(\"field_two\", data.keys())\n self.assertIn(\"field_three\", data.keys())\n # Four not in the default set\n self.assertNotIn(\"field_four\", data.keys())\n\n def test_serializer_include_returns_nothing_except_field_name(self):\n request = self.get_request(\"test/?include_fields=field_two\")\n data = TestObjectSerializer(self.test_object, context={\"request\": request}).data\n self.assertNotIn(\"field_one\", data.keys())\n self.assertIn(\"field_two\", data.keys())\n self.assertNotIn(\"field_three\", data.keys())\n self.assertNotIn(\"field_four\", data.keys())\n\n def test_serializer_field_sets_returns_only_fields_in_list(self):\n request = self.get_request(\"test/?field_set=one_and_three\")\n data = TestObjectSerializer(self.test_object, context={\"request\": request}).data\n self.assertIn(\"field_one\", data.keys())\n self.assertNotIn(\"field_two\", data.keys())\n self.assertIn(\"field_three\", data.keys())\n self.assertNotIn(\"field_four\", data.keys())\n\n def test_no_default_specified_returns_the_full_resource(self):\n default = TestObjectSerializer.Meta.field_sets.pop(\"default\")\n request = self.get_request(\"test\")\n serializer = TestObjectSerializer(self.test_object, context={\"request\": request})\n data = serializer.data\n self.assertIn(\"field_one\", data.keys())\n self.assertIn(\"field_two\", data.keys())\n self.assertIn(\"field_three\", data.keys())\n self.assertIn(\"field_four\", data.keys())\n TestObjectSerializer.Meta.field_sets['default'] = default\n\n def test_include_field_that_does_not_exist_returns_none(self):\n request = self.get_request(\"test/?include_fields=field_five\")\n serializer = TestObjectSerializer(self.test_object, context={\"request\": request})\n data = serializer.data\n self.assertEqual(len(data.keys()), 0)\n\n def test_exclude_field_that_does_not_exist_uses_default_serializer(self):\n request = self.get_request(\"test/?exclude_fields=field_five\")\n serializer = TestObjectSerializer(self.test_object, context={\"request\": request})\n data = serializer.data\n self.assertIn(\"field_one\", data.keys())\n self.assertIn(\"field_two\", data.keys())\n self.assertIn(\"field_three\", data.keys())\n self.assertNotIn(\"field_four\", data.keys())\n\n def test_field_set_specified_that_does_not_exist_uses_default_field_set(self):\n request = self.get_request(\"test/?field_set=doesNotExist\")\n data1 = TestObjectSerializer(self.test_object, context={\"request\": request}).data\n request = self.get_request(\"test/?field_set=default\")\n print(data1)\n data2 = TestObjectSerializer(self.test_object, context={\"request\": request}).data\n print(data2)\n self.assertEqual(data1, data2)\n\n def test_no_lingering_effects_integration(self):\n request = self.get_request(\"test/?field_set=one_and_three\")\n data = TestObjectSerializer(self.test_object, context={\"request\": request}).data\n self.assertIn(\"field_one\", data.keys())\n self.assertNotIn(\"field_two\", data.keys())\n self.assertIn(\"field_three\", data.keys())\n self.assertNotIn(\"field_four\", data.keys())\n request = self.get_request(\"test/?field_set=all\")\n data = TestObjectSerializer(self.test_object, context={\"request\": request}).data\n self.assertIn(\"field_one\", data.keys())\n self.assertIn(\"field_two\", data.keys())\n self.assertIn(\"field_three\", data.keys())\n self.assertIn(\"field_four\", data.keys())\n request = self.get_request(\"test/?include_fields=field_one\")\n data = TestObjectSerializer(self.test_object, context={\"request\": request}).data\n self.assertIn(\"field_one\", data.keys())\n self.assertNotIn(\"field_two\", data.keys())\n self.assertNotIn(\"field_three\", data.keys())\n self.assertNotIn(\"field_four\", data.keys())\n", "sub_path": "tests/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 5804, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "unittest.TestCase", "line_number": 9, "usage_type": "name"}, {"api_name": "tests.views.TestObjectViewSet", "line_number": 17, "usage_type": "call"}, {"api_name": "rest_framework.test.APIRequestFactory", "line_number": 20, "usage_type": "call"}, {"api_name": "tests.models.TestObject", "line_number": 21, "usage_type": "call"}, {"api_name": "django.core.exceptions.ImproperlyConfigured", "line_number": 24, "usage_type": "argument"}, {"api_name": "tests.serializers.TestObjectSerializer", "line_number": 25, "usage_type": "call"}, {"api_name": "django.core.exceptions.ImproperlyConfigured", "line_number": 28, "usage_type": "argument"}, {"api_name": "tests.serializers.TestObjectSerializer", "line_number": 29, "usage_type": "call"}, {"api_name": "tests.serializers.TestObjectSerializer", "line_number": 33, "usage_type": "call"}, {"api_name": "tests.serializers.TestObjectSerializer", "line_number": 35, "usage_type": "call"}, {"api_name": "tests.serializers.TestObjectSerializer", "line_number": 40, "usage_type": "call"}, {"api_name": "tests.serializers.TestObjectSerializer", "line_number": 49, "usage_type": "call"}, {"api_name": "tests.serializers.TestObjectSerializer", "line_number": 57, "usage_type": "call"}, {"api_name": "tests.serializers.TestObjectSerializer.Meta.field_sets.pop", "line_number": 64, "usage_type": "call"}, {"api_name": "tests.serializers.TestObjectSerializer.Meta", "line_number": 64, "usage_type": "attribute"}, {"api_name": "tests.serializers.TestObjectSerializer", "line_number": 64, "usage_type": "name"}, {"api_name": "tests.serializers.TestObjectSerializer", "line_number": 66, "usage_type": "call"}, {"api_name": "tests.serializers.TestObjectSerializer.Meta", "line_number": 72, "usage_type": "attribute"}, {"api_name": "tests.serializers.TestObjectSerializer", "line_number": 72, "usage_type": "name"}, {"api_name": "tests.serializers.TestObjectSerializer", "line_number": 76, "usage_type": "call"}, {"api_name": "tests.serializers.TestObjectSerializer", "line_number": 82, "usage_type": "call"}, {"api_name": "tests.serializers.TestObjectSerializer", "line_number": 91, "usage_type": "call"}, {"api_name": "tests.serializers.TestObjectSerializer", "line_number": 94, "usage_type": "call"}, {"api_name": "tests.serializers.TestObjectSerializer", "line_number": 100, "usage_type": "call"}, {"api_name": "tests.serializers.TestObjectSerializer", "line_number": 106, "usage_type": "call"}, {"api_name": "tests.serializers.TestObjectSerializer", "line_number": 112, "usage_type": "call"}]} +{"seq_id": "319912694", "text": "###\n##### Copyright 2021 Hewlett Packard Enterprise Development LP\n#####\n##### Licensed under the Apache License, Version 2.0 (the \"License\");\n##### You may not use this file except in compliance with the License.\n##### You may obtain a copy of the License at\n#####\n##### http://www.apache.org/licenses/LICENSE-2.0\n#####\n##### Unless required by applicable law or agreed to in writing, software\n##### distributed under the License is distributed on an \"AS IS\" BASIS,\n##### WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n##### See the License for the specific language governing permissions and\n##### limitations under the License.\n#######\n##\n\n\n#!/usr/bin/env python\n\"\"\"\nThis script expands or shrinks an existing HPECP cluster as per the user requirements.\n\"\"\"\nimport json\nimport requests\nimport config\nimport sys\nimport time\n\ndef get_details(uri, header):\n \"\"\"\n Function to make GET API calls\n Returns:\n GET call body is returned if call is successfull\n Else returns the status code of the error\n \"\"\"\n try:\n get_response = requests.get(uri, verify=False, headers=header)\n if get_response.status_code == 200:\n get_response = json.loads(get_response.text)\n return get_response\n\n elif get_response.status_code != 200:\n print(\"The status code is: \" + str(get_response.status_code))\n print(\"Could Not Make the Get Call, Please Check Details, Exiting Program\")\n sys.exit(1)\n except requests.ConnectionError as req_err:\n print(\"get_details: The exception '{}' occured during connection\".format(req_err))\n sys.exit(1)\n except Exception as run_err:\n print(\"get_details: The exception '{}' has occured\".format(run_err))\n sys.exit(1)\n\ndef get_cluster_id(session_ID, var_config):\n try:\n header = {\n \"X-BDS-SESSION\" : session_ID\n } \n uri = \"http://\" + var_config.controller_IP + \":8080/api/v2/k8scluster\"\n cluster_details = get_details(uri, header)\n flag = 1\n for cluster in cluster_details['_embedded']['k8sclusters']:\n if cluster['label']['name'] == var_config.cluster_name:\n flag = 0\n cluster_id = cluster['_links']['self']['href']\n if flag != 0:\n print(\"Could not find the required cluster, please provide te right cluster name in the input file.\")\n sys.exit(1)\n #print(cluster_id)\n return(cluster_id)\n except Exception as run_err:\n print(\"get_cluster_id(): The exception '{}' has occured while getting cluster ID\".format(run_err)) \n sys.exit(1) \n\n\ndef get_host_uri(session_ID, var_config):\n try:\n header = {\n \"X-BDS-SESSION\" : session_ID\n } \n uri = \"http://\" + var_config.controller_IP + \":8080/api/v2/worker/k8shost\"\n host_details = get_details(uri, header)\n host_uri=[]\n status=[]\n for ip in var_config.host_ips:\n for host in host_details['_embedded']['k8shosts']:\n if host['ipaddr'] == ip:\n host_uri.append(host['_links']['self']['href'])\n status.append(host['status'])\n if len(host_uri) == 0:\n print(\"Could not Retrieve required host information: Check input host IP address.\")\n sys.exit(1)\n flag =0\n for id,item in enumerate(status):\n if item != 'ready' and var_config.expand_shrink ==\"expand\":\n flag=1\n print(\" The host \" +str(id+1) + \" is not in the required 'ready' state to be added, please wait for host to be ready or change the host\")\n if item != 'configured' and var_config.expand_shrink ==\"shrink\":\n flag=1\n print(\" The host \" +str(id+1) + \" is not in the required 'confifured' state to be removed, please ensure host is a part of the cluster or change the host\")\n if flag == 1:\n sys.exit(1)\n #print(host_uri)\n return(host_uri)\n except Exception as run_err:\n print(\"get_host_uri(): The exception '{}' has occured while getting host URI\".format(run_err)) \n sys.exit(1) \n\n\ndef get_session(var_config):\n \"\"\"\n Get session ID for use in subsequent API calls\n\n Returns:\n Session-ID\n \"\"\"\n try:\n header = {\n \"Content-Type\" : \"application/json\"\n } \n uri = \"http://\" + var_config.controller_IP + \":8080/api/v1/login\"\n payload = {\n \"name\" : var_config.username,\n \"password\": var_config.password\n }\n\n response = requests.post(uri, json=payload, verify=False, headers=header)\n if response.status_code == 201:\n print(\"Successfully Logged In\")\n return(response.headers['Location'])\n elif response.status_code != 201:\n print(\"Could Not Login, Please Check Credentials, Exiting Program\")\n sys.exit(1)\n except Exception as run_err:\n print(\"get_session(): The exception '{}' has occured while getting session ID\".format(run_err)) \n print(\"Could Not Login, Please Check Controller IP address, Exiting Program\")\n sys.exit(1) \n\ndef expand_shrink(session_ID, var_config, host_uri, cluster_id):\n try:\n header = {\n \"X-BDS-SESSION\" : session_ID\n } \n uri = \"http://\" + var_config.controller_IP + \":8080\" + cluster_id + \"/change_task\"\n config = []\n host = {}\n if var_config.expand_shrink == 'expand':\n k8shosts = \"add_k8shosts_config\"\n for node, role in zip (host_uri, var_config.host_role):\n host[\"node\"] = node\n if role == \"master\" or \"worker\":\n host[\"role\"] = role\n else:\n print(\"Enter the right roles for the hosts\")\n sys.exit(1)\n config.append(host)\n host = {}\n elif var_config.expand_shrink == 'shrink':\n k8shosts = \"remove_k8shosts\"\n for node in host_uri:\n config.append(node)\n else:\n print(\"Enter right cluster operation in input file\")\n sys.exit(1)\n #print(config)\n #print(k8shosts)\n payload = {\n \"change_spec\": {\n k8shosts: config\n },\n \"operation\": \"reconfigure\",\n \"reason\": \"\"\n }\n #print(payload)\n response = requests.post(uri, json=payload, verify=False, headers=header)\n #print(response.status_code)\n\n if response.status_code == 204:\n print(\"Started cluster operation\")\n elif response.status_code != 204:\n print(\"Could not complete cluster expand/shrink operation\")\n uri = \"http://\" + var_config.controller_IP + \":8080\" + cluster_id \n timeout = 600\n timeout_start = time.time()\n print(\"Waiting for cluster status to become Ready...\")\n while time.time() < timeout_start + timeout:\n cluster_details = get_details(uri, header)\n if cluster_details[\"status\"] ==\"ready\":\n break\n\n except Exception as run_err:\n print(\"expand_shrink(): The exception '{}' has occured while expanding/shrinking cluster\".format(run_err)) \n print(\"Could not complete cluster expand/shrink operation\")\n sys.exit(1) \n\n#main function\ndef main():\n \"\"\"\n Main Function\n \"\"\"\n # Creating a generic object for Config class \n print(\"Gathering Input Variables...\")\n var_config = config.Config()\n print(\"\\n\")\n\n print(\"Getting Session ID...\")\n session_ID = get_session(var_config)\n print(\"\\n\")\n\n print(\"Getting required Host URI's...\")\n host_uri= get_host_uri(session_ID, var_config)\n print(\"\\n\")\n\n print(\"Acquiring cluster ID...\")\n cluster_id = get_cluster_id(session_ID, var_config)\n print(\"\\n\")\n\n print(\"Performing cluster modification...\")\n expand_shrink(session_ID, var_config, host_uri, cluster_id)\n print(\"\\n\")\n\n print(\"Cluster has been Modified\")\n\n\nmain()\n", "sub_path": "DL/scripts/expand_shrink_cluster/expand_shrink.py", "file_name": "expand_shrink.py", "file_ext": "py", "file_size_in_byte": 8159, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "requests.get", "line_number": 37, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 39, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 45, "usage_type": "call"}, {"api_name": "requests.ConnectionError", "line_number": 46, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 48, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 51, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 67, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 72, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 91, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 101, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 106, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 126, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 132, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 136, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 154, "usage_type": "call"}, {"api_name": "config.append", "line_number": 155, "usage_type": "call"}, {"api_name": "config.append", "line_number": 160, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 163, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 174, "usage_type": "call"}, {"api_name": "time.time", "line_number": 183, "usage_type": "call"}, {"api_name": "time.time", "line_number": 185, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 193, "usage_type": "call"}, {"api_name": "config.Config", "line_number": 202, "usage_type": "call"}]} +{"seq_id": "126450689", "text": "from django.db import models\nfrom user.models import User\n\n\nclass Activity(models.Model):\n owner = models.ForeignKey(User, on_delete=models.CASCADE, related_name='owner')\n picture = models.ImageField(upload_to='images')\n text = models.TextField(blank=True)\n\n\nclass Group(models.Model):\n name = models.CharField(max_length=100)\n description = models.TextField()\n\n def __str__(self):\n return self.name\n\n\nclass GroupUser(models.Model):\n user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)\n group = models.ForeignKey(Group, on_delete=models.CASCADE)\n\n\nclass Event(models.Model):\n name = models.CharField(max_length=50)\n description = models.TextField()\n date = models.DateField()\n start_time = models.TimeField()\n end_time = models.TimeField()\n\n def __str__(self):\n return self.name\n\n\nclass EventUser(models.Model):\n user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)\n event = models.ForeignKey(Event, on_delete=models.CASCADE)\n", "sub_path": "Django/network/social/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1021, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.db.models.Model", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 5, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 6, "usage_type": "call"}, {"api_name": "user.models.User", "line_number": 6, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 6, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 6, "usage_type": "attribute"}, {"api_name": "django.db.models.ImageField", "line_number": 7, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 8, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 12, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 12, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "user.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 20, "usage_type": "call"}, {"api_name": "user.models.User", "line_number": 20, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.db.models.Model", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.TimeField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.models.TimeField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 35, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 35, "usage_type": "name"}, {"api_name": "user.models", "line_number": 36, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 36, "usage_type": "call"}, {"api_name": "user.models.User", "line_number": 36, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 36, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 36, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 37, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 37, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 37, "usage_type": "attribute"}]} +{"seq_id": "169980580", "text": "#%%\n\n'''\nadd jupyter magic marker to show figures in visual code\n'''\nimport pandas as pd\nimport os\nimport matplotlib.pyplot as plt\n\nclass Descriptives:\n '''\n following https://towardsdatascience.com/data-exploration-and-analysis-using-python-e564473d7607\n '''\n def __init__(self, data):\n self.data = data\n self.dirname = os.getcwd()\n\n def assess_distribution(self, column):\n '''\n Univariate Analysis, differs between categorical and continous\n normal distribution: sepal_length, width \n Petal Length, width : two distributions \n equal distrubtion for each class. \n '''\n \n if os.path.isfile(f\"{self.dirname}/data/hist/{column}\"):\n os.remove(f\"{self.dirname}/data/hist/{column}\")\n\n plt.rcParams[\"figure.figsize\"] = [7.00, 3.50]\n plt.rcParams[\"figure.autolayout\"] = True\n plt.title(label = column)\n plt.hist(self.data[column])\n plt.savefig(f\"{self.dirname}/data/hist/hist_{column}.png\")\n plt.show()\n plt.close()\n\n# %%\n\n\nif __name__ == '__main__':\n from datetime import datetime\n\n dirname = os.getcwd()\n data = pd.read_csv(f\"{str(dirname)}/data/temps.csv\")\n\n # used to develop, no outliers removed\n Description = Descriptives(data, pd.get_dummies(data))\n \n for i in data.columns:\n Description.assess_distribution(i)\n\n", "sub_path": "src/description/descriptor.py", "file_name": "descriptor.py", "file_ext": "py", "file_size_in_byte": 1385, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "os.getcwd", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 29, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 30, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "os.getcwd", "line_number": 43, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 44, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "649463932", "text": "\"\"\"\nusage: sl summary [options]\n\nDisplay summary information about the account\n\"\"\"\n# :license: MIT, see LICENSE for more details.\n\nfrom SoftLayer import NetworkManager\nfrom SoftLayer.CLI import CLIRunnable, Table\n\n\nclass Summary(CLIRunnable):\n \"\"\"\nusage: sl summary [options]\n\nDisplay summary information about the account\n\nOptions:\n --sortby=ARG Column to sort by. options: datacenter, vlans,\n subnets, IPs, networking, hardware, ccis, firewall\n\"\"\"\n action = None\n\n def execute(self, args):\n mgr = NetworkManager(self.client)\n datacenters = mgr.summary_by_datacenter()\n\n t = Table([\n 'datacenter', 'vlans', 'subnets', 'IPs', 'networking',\n 'hardware', 'ccis'\n ])\n t.sortby = args.get('--sortby') or 'datacenter'\n\n for name, dc in datacenters.items():\n t.add_row([\n name,\n dc['vlanCount'],\n dc['subnetCount'],\n dc['primaryIpCount'],\n dc['networkingCount'],\n dc['hardwareCount'],\n dc['virtualGuestCount'],\n ])\n\n return t\n", "sub_path": "SoftLayer/CLI/modules/summary.py", "file_name": "summary.py", "file_ext": "py", "file_size_in_byte": 1144, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "SoftLayer.CLI.CLIRunnable", "line_number": 12, "usage_type": "name"}, {"api_name": "SoftLayer.NetworkManager", "line_number": 25, "usage_type": "call"}, {"api_name": "SoftLayer.CLI.Table", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "289217154", "text": "\nfrom pyqtgraph.Qt import QtGui, QtCore\nimport numpy as np\nimport pyqtgraph as pg\nimport serial\nfrom multiprocessing import Process, Queue\nimport time, threading\n\n\ndef display(q, windowWidth):\n\n win2 = pg.GraphicsWindow(title=\"brxs\")\n win2.setWindowTitle('brxs')\n p2 = win2.addPlot(title=\"brxs\")\n curve = p2.plot(pen='y')\n\n x_np = []\n y_np = []\n\n def updateInProc(curve, q, x, y, windowWidth):\n item = q.get()\n x.append(item[0])\n y.append(item[1])\n x = x[np.size(x)-windowWidth:]\n y = y[np.size(y)-windowWidth:]\n curve.setData(x,y)\n\n timer = QtCore.QTimer()\n timer.timeout.connect(lambda: updateInProc(curve, q, x_np, y_np, windowWidth))\n timer.start(0.00000000000001)\n\n QtGui.QApplication.instance().exec_()\n\ndef io(running,q):\n ser = serial.Serial(\"/dev/ttyUSB0\", 57600)\n dummy = ser.readline()\n t = 0\n while running.is_set():\n line = ser.readline()\n line = line.decode(\"utf-8\")\n line = line.split(\",\")[:19]\n try:\n y = int(float(line[analogPort]))\n except:\n y = 0\n t += 0.0165\n q.put([t,y])\n time.sleep(0.0000000000000000000001)\n print(\"Done\")\n\ndef qtPlotSerialData(windowWidth=1000):\n global analogPort\n analogPort = 0 # port data to be plotted\n q = Queue()\n run = threading.Event()\n run.set()\n\n t = threading.Thread(target=io, args=(run,q))\n t.start()\n\n p = Process(target=display, args=(q, windowWidth))\n p.start()\n while(1):\n port = input(\"port? \")\n try:\n port = int(port)\n except ValueError:\n port = 0\n if ((port >= 0) & (port < 19)):\n analogPort = port\n\n\nif __name__ == '__main__':\n qtPlotSerialData(1000)", "sub_path": "abraxasOne/qtPlotSerialData.py", "file_name": "qtPlotSerialData.py", "file_ext": "py", "file_size_in_byte": 1780, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pyqtgraph.GraphicsWindow", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.size", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.size", "line_number": 25, "usage_type": "call"}, {"api_name": "pyqtgraph.Qt.QtCore.QTimer", "line_number": 28, "usage_type": "call"}, {"api_name": "pyqtgraph.Qt.QtCore", "line_number": 28, "usage_type": "name"}, {"api_name": "pyqtgraph.Qt.QtGui.QApplication.instance", "line_number": 32, "usage_type": "call"}, {"api_name": "pyqtgraph.Qt.QtGui.QApplication", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pyqtgraph.Qt.QtGui", "line_number": 32, "usage_type": "name"}, {"api_name": "serial.Serial", "line_number": 35, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 48, "usage_type": "call"}, {"api_name": "multiprocessing.Queue", "line_number": 54, "usage_type": "call"}, {"api_name": "threading.Event", "line_number": 55, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 58, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "620634579", "text": "#!/usr/bin/python3\n\"\"\"\nStarts a Flask web application with the following conditions\n\n- listens on 0.0.0.0, port 5000\n- routes\n - /cities_by_states: lists state objects in dbstorage and associated cities\n\"\"\"\n\nfrom flask import Flask\nfrom flask import render_template\nfrom models import storage\n\napp = Flask(__name__)\n\n\n@app.route('/cities_by_states', strict_slashes=False)\ndef cities_by_states():\n \"\"\" displays state objects in dbstorage and associated cities \"\"\"\n return render_template('8-cities_by_states.html',\n states=storage.all('State').values())\n\n\n@app.teardown_appcontext\ndef teardown(self):\n \"\"\"Remove the current SQLAlchemy session.\"\"\"\n storage.close()\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n", "sub_path": "web_flask/8-cities_by_states.py", "file_name": "8-cities_by_states.py", "file_ext": "py", "file_size_in_byte": 758, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "flask.Flask", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 20, "usage_type": "call"}, {"api_name": "models.storage.all", "line_number": 21, "usage_type": "call"}, {"api_name": "models.storage", "line_number": 21, "usage_type": "name"}, {"api_name": "models.storage.close", "line_number": 27, "usage_type": "call"}, {"api_name": "models.storage", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "293683605", "text": "from ftw import bumblebee\nfrom ftw.bumblebee.interfaces import IBumblebeeDocument\nfrom opengever.api import _\nfrom opengever.api.actors import serialize_actor_id_to_json_summary\nfrom opengever.api.serializer import extend_with_backreferences\nfrom opengever.api.serializer import GeverSerializeToJson\nfrom opengever.base.helpers import display_name\nfrom opengever.base.interfaces import IOpengeverBaseLayer\nfrom opengever.base.interfaces import IReferenceNumber\nfrom opengever.document.approvals import IApprovalList\nfrom opengever.document.behaviors import IBaseDocument\nfrom opengever.document.interfaces import ICheckinCheckoutManager\nfrom opengever.document.versioner import Versioner\nfrom opengever.meeting import is_meeting_feature_enabled\nfrom opengever.meeting.model import SubmittedDocument\nfrom opengever.workspaceclient.interfaces import ILinkedDocuments\nfrom plone.restapi.deserializer import json_body\nfrom plone.restapi.interfaces import IExpandableElement\nfrom plone.restapi.interfaces import IJsonCompatible\nfrom plone.restapi.interfaces import ISerializeToJson\nfrom plone.restapi.services.content.update import ContentPatch\nfrom zExceptions import Forbidden\nfrom zope.component import adapter\nfrom zope.component import getMultiAdapter\nfrom zope.interface import implementer\nfrom zope.interface import Interface\nimport os.path\n\n\nMIME_DOCX = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'\n\n\n@implementer(ISerializeToJson)\n@adapter(IBaseDocument, Interface)\nclass SerializeDocumentToJson(GeverSerializeToJson):\n\n def __call__(self, *args, **kwargs):\n result = super(SerializeDocumentToJson, self).__call__(*args, **kwargs)\n\n ref_num = IReferenceNumber(self.context)\n result[u'reference_number'] = ref_num.get_number()\n\n version = \"current\" if kwargs.get('version') is None else kwargs.get('version')\n obj = self.getVersion(version)\n bumblebee_service = bumblebee.get_service_v3()\n result['bumblebee_checksum'] = IBumblebeeDocument(obj).get_checksum()\n result[u'thumbnail_url'] = bumblebee_service.get_representation_url(\n obj, 'thumbnail')\n result[u'preview_url'] = bumblebee_service.get_representation_url(\n obj, 'preview')\n result[u'pdf_url'] = bumblebee_service.get_representation_url(\n obj, 'pdf')\n result[u'file_extension'] = obj.get_file_extension()\n\n extend_with_backreferences(\n result, self.context, self.request, 'relatedItems',\n documents_only=True)\n\n checked_out_by = obj.checked_out_by()\n checked_out_by_fullname = display_name(checked_out_by) if checked_out_by else None\n\n if is_meeting_feature_enabled():\n self.extend_with_meeting_metadata(result)\n\n additional_metadata = {\n 'checked_out': checked_out_by,\n 'checked_out_fullname': checked_out_by_fullname,\n 'checkout_collaborators': list(obj.get_collaborators()),\n 'file_mtime': obj.get_file_mtime(),\n 'is_collaborative_checkout': obj.is_collaborative_checkout(),\n 'is_locked': obj.is_locked(),\n 'containing_dossier': obj.containing_dossier_title(),\n 'containing_subdossier': obj.containing_subdossier_title(),\n 'containing_subdossier_url': obj.containing_subdossier_url(),\n 'trashed': obj.is_trashed,\n 'is_shadow_document': obj.is_shadow_document(),\n 'current_version_id': obj.get_current_version_id(\n missing_as_zero=True),\n 'teamraum_connect_links': ILinkedDocuments(obj).serialize(),\n 'workspace_document_urls': ILinkedDocuments(obj).get_workspace_document_urls(),\n 'creator': serialize_actor_id_to_json_summary(obj.Creator()),\n }\n\n result.update(additional_metadata)\n return result\n\n def getVersion(self, version):\n \"\"\"Return context when no lazy initial version exists.\"\"\"\n\n if not Versioner(self.context).has_initial_version():\n return self.context\n\n return super(SerializeDocumentToJson, self).getVersion(version)\n\n def extend_with_meeting_metadata(self, result):\n submitted_documents = SubmittedDocument.query.by_source(self.context).all()\n result['submitted_with'] = [{'title': doc.proposal.title,\n '@id': doc.proposal.get_url()} for doc in submitted_documents]\n\n proposal = self.context.get_proposal()\n if proposal:\n result['proposal'] = {'title': proposal.Title(), '@id': proposal.absolute_url()}\n else:\n result['proposal'] = None\n\n result['meeting'] = None\n submitted_proposal = self.context.get_submitted_proposal()\n if submitted_proposal:\n result['submitted_proposal'] = {\n 'title': submitted_proposal.Title(), '@id': submitted_proposal.absolute_url()}\n meeting = submitted_proposal.load_model().get_meeting()\n if meeting:\n result['meeting'] = {'title': meeting.title, '@id': meeting.get_url()}\n else:\n result['submitted_proposal'] = None\n\n\nclass DocumentPatch(ContentPatch):\n\n def reply(self):\n data = json_body(self.request)\n\n self._validate_checked_out(data)\n self._validate_proposal_document(data)\n\n return super(DocumentPatch, self).reply()\n\n def _validate_checked_out(self, data):\n \"\"\"Only allow updating a documents file if the document is checked-out\n by the current user.\n \"\"\"\n if 'file' not in data:\n return\n\n manager = getMultiAdapter((self.context, self.request),\n ICheckinCheckoutManager)\n if not manager.is_checked_out_by_current_user():\n raise Forbidden(\n _(u'msg_not_checked_out_by_current_user',\n default=u'Document not checked-out by current user.'))\n\n def _validate_proposal_document(self, data):\n \"\"\"Prevent a proposals document being replaced by non-docx file.\n \"\"\"\n if not self.context.is_inside_a_proposal():\n return\n\n if 'file' not in data:\n return\n\n value = data['file']\n if not value:\n raise Forbidden(\n _(u'msg_needs_file_in_proposal_document',\n default=u\"It's not possible to have no file in proposal documents.\"))\n\n content_type = value.get('content-type')\n filename = value.get('filename')\n\n if content_type and content_type != MIME_DOCX:\n raise Forbidden(\n _(u'msg_docx_mime_type_for_proposal',\n default=u'Mime type must be ${docx_mimetype} for proposal documents.',\n mapping={'docx_mimetype': MIME_DOCX}))\n\n if not os.path.splitext(filename)[1].lower() == '.docx':\n raise Forbidden(\n _(u'msg_docx_file_extension_for_proposal',\n default=u'File extension must be .docx for proposal documents.'))\n\n\n@implementer(IExpandableElement)\n@adapter(IBaseDocument, IOpengeverBaseLayer)\nclass Approvals(object):\n\n def __init__(self, context, request):\n self.context = context\n self.request = request\n\n def __call__(self, expand=True):\n approvals = IApprovalList(self.context)\n return {'approvals': IJsonCompatible(approvals.get())}\n", "sub_path": "opengever/api/document.py", "file_name": "document.py", "file_ext": "py", "file_size_in_byte": 7425, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "opengever.api.serializer.GeverSerializeToJson", "line_number": 35, "usage_type": "name"}, {"api_name": "opengever.base.interfaces.IReferenceNumber", "line_number": 40, "usage_type": "call"}, {"api_name": "ftw.bumblebee.get_service_v3", "line_number": 45, "usage_type": "call"}, {"api_name": "ftw.bumblebee", "line_number": 45, "usage_type": "name"}, {"api_name": "ftw.bumblebee.interfaces.IBumblebeeDocument", "line_number": 46, "usage_type": "call"}, {"api_name": "opengever.api.serializer.extend_with_backreferences", "line_number": 55, "usage_type": "call"}, {"api_name": "opengever.base.helpers.display_name", "line_number": 60, "usage_type": "call"}, {"api_name": "opengever.meeting.is_meeting_feature_enabled", "line_number": 62, "usage_type": "call"}, {"api_name": "opengever.workspaceclient.interfaces.ILinkedDocuments", "line_number": 79, "usage_type": "call"}, {"api_name": "opengever.workspaceclient.interfaces.ILinkedDocuments", "line_number": 80, "usage_type": "call"}, {"api_name": "opengever.api.actors.serialize_actor_id_to_json_summary", "line_number": 81, "usage_type": "call"}, {"api_name": "opengever.document.versioner.Versioner", "line_number": 90, "usage_type": "call"}, {"api_name": "opengever.meeting.model.SubmittedDocument.query.by_source", "line_number": 96, "usage_type": "call"}, {"api_name": "opengever.meeting.model.SubmittedDocument.query", "line_number": 96, "usage_type": "attribute"}, {"api_name": "opengever.meeting.model.SubmittedDocument", "line_number": 96, "usage_type": "name"}, {"api_name": "zope.interface.implementer", "line_number": 33, "usage_type": "call"}, {"api_name": "plone.restapi.interfaces.ISerializeToJson", "line_number": 33, "usage_type": "argument"}, {"api_name": "zope.component.adapter", "line_number": 34, "usage_type": "call"}, {"api_name": "opengever.document.behaviors.IBaseDocument", "line_number": 34, "usage_type": "argument"}, {"api_name": "zope.interface.Interface", "line_number": 34, "usage_type": "argument"}, {"api_name": "plone.restapi.services.content.update.ContentPatch", "line_number": 118, "usage_type": "name"}, {"api_name": "plone.restapi.deserializer.json_body", "line_number": 121, "usage_type": "call"}, {"api_name": "zope.component.getMultiAdapter", "line_number": 135, "usage_type": "call"}, {"api_name": "opengever.document.interfaces.ICheckinCheckoutManager", "line_number": 136, "usage_type": "argument"}, {"api_name": "zExceptions.Forbidden", "line_number": 138, "usage_type": "call"}, {"api_name": "opengever.api._", "line_number": 139, "usage_type": "call"}, {"api_name": "zExceptions.Forbidden", "line_number": 153, "usage_type": "call"}, {"api_name": "opengever.api._", "line_number": 154, "usage_type": "call"}, {"api_name": "zExceptions.Forbidden", "line_number": 161, "usage_type": "call"}, {"api_name": "opengever.api._", "line_number": 162, "usage_type": "call"}, {"api_name": "os.path.path.splitext", "line_number": 166, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 166, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 166, "usage_type": "name"}, {"api_name": "zExceptions.Forbidden", "line_number": 167, "usage_type": "call"}, {"api_name": "opengever.api._", "line_number": 168, "usage_type": "call"}, {"api_name": "opengever.document.approvals.IApprovalList", "line_number": 181, "usage_type": "call"}, {"api_name": "plone.restapi.interfaces.IJsonCompatible", "line_number": 182, "usage_type": "call"}, {"api_name": "zope.interface.implementer", "line_number": 172, "usage_type": "call"}, {"api_name": "plone.restapi.interfaces.IExpandableElement", "line_number": 172, "usage_type": "argument"}, {"api_name": "zope.component.adapter", "line_number": 173, "usage_type": "call"}, {"api_name": "opengever.document.behaviors.IBaseDocument", "line_number": 173, "usage_type": "argument"}, {"api_name": "opengever.base.interfaces.IOpengeverBaseLayer", "line_number": 173, "usage_type": "argument"}]} +{"seq_id": "89956405", "text": "\nimport argparse\nfrom pprint import pprint\nimport os\nimport logging\nimport logging.handlers\nimport contextlib\n\nimport session\nimport configuration\n\nlogger = logging.getLogger()\n\ndef setup_logging(config):\n # Get the level\n level=getattr(logging, config.logging_level)\n \n # Get the logger\n logger.setLevel(level)\n \n # Create a handler\n log_file_path = os.path.join(config.logging_dir, 'sandiegoinfocus.rets.download-metadata.log')\n handler = logging.handlers.WatchedFileHandler(log_file_path)\n handler.setLevel(level)\n \n # Create formatter\n formatter = logging.Formatter('%(asctime)s: %(name)s: %(levelname)s: %(message)s')\n \n # add formatter to handler\n handler.setFormatter(formatter)\n \n # Add handler to logger\n logger.addHandler(handler)\n logger.info('Logging level: {}'.format(level))\n\ndef download_metadata(config):\n #with Session.create('1.7.2') as s:\n # logger.debug('Logging in to RETS provider')\n # if s.login(login_url, username, password):\n # logger.debug('Requesting metadata')\n # res = s.get_metadata()\n # with open(file_path, 'w') as fp:\n # logger.debug('Writing response to file: %s', file_path)\n # fp.write(res.read().decode('utf-8', 'strict'))\n #logging.debug('Logging into RETS service')\n \n output_file_path = os.path.join(config.download_dir, 'metadata.xml')\n logger.debug('Metadata output file path: %s', output_file_path)\n \n with contextlib.closing(session.Session()) as rets_session:\n rets_session.login(\n login_url=config.rets_login_url,\n username=config.rets_username,\n password=config.rets_password)\n \n metadata_response = rets_session.get_metadata_response()\n #print(metadata_response)\n \n with open(output_file_path, 'w') as output_fp:\n output_fp.write(metadata_response.read().decode('utf-8', 'strict'))\n \ndef main():\n arg_parser = argparse.ArgumentParser()\n \n arg_parser.add_argument(\n '-c',\n '--config',\n required=True,\n dest='config_path',\n help=\"Path to the configuration file\")\n \n cmd_args = arg_parser.parse_args()\n config = configuration.read_configuration(cmd_args.config_path)\n setup_logging(config)\n download_metadata(config)\n\n logging.info('Finished')\n\nif __name__ == '__main__':\n main()\n ", "sub_path": "rets/download_metadata.py", "file_name": "download_metadata.py", "file_ext": "py", "file_size_in_byte": 2451, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "logging.handlers.WatchedFileHandler", "line_number": 23, "usage_type": "call"}, {"api_name": "logging.handlers", "line_number": 23, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "contextlib.closing", "line_number": 50, "usage_type": "call"}, {"api_name": "session.Session", "line_number": 50, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 63, "usage_type": "call"}, {"api_name": "configuration.read_configuration", "line_number": 73, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "258965708", "text": "#!/usr/bin/env python3.4\nimport os,sys\nimport cgi, cgitb\nimport pymysql\nimport form_action_functions_db\n\n\ncgitb.enable()\nsys.stderr = sys.stdout\nform = cgi.FieldStorage()\n\ndef form_action_db(form, db, cur):\n print (\"\\n
\") \n    form_action_functions_db.form_dictionary(form) #Анализируем строку запроса\n    form_action_functions_db.print_form_db(form) #Создаем форму и отправляем данные на сервер\n    form_action_functions_db.db_list(form, db, cur) #Записываем в базу и оцениваем содержание\n    print (\"\\n
\") \n\n\nif __name__=='__main__':\n print('''\\\nContent-type:text/html\\r\\n\n\n\\nОтладка, в файл, из файла, обработка\\n\n

Отладка, в файл, из файла, обработка

\n    ''')\n    #соединяемся с базой данных\n    db  =  pymysql.connect(host = \"127.0.0.1\", user = \"g06u32\", passwd = \"mysql16\", db = \"g06u32\", charset = \"utf8\",use_unicode = True) # Open database connection\n    cur  =  db.cursor() # prepare a cur object using cursor() method\n    cur.execute('SET NAMES utf8') # execute SQL query using execute() method\n    form_action_db(form, db, cur)", "sub_path": "cgi-bin/form_action_db.py", "file_name": "form_action_db.py", "file_ext": "py", "file_size_in_byte": 1281, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "cgitb.enable", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 9, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 9, "usage_type": "attribute"}, {"api_name": "cgi.FieldStorage", "line_number": 10, "usage_type": "call"}, {"api_name": "form_action_functions_db.form_dictionary", "line_number": 14, "usage_type": "call"}, {"api_name": "form_action_functions_db.print_form_db", "line_number": 15, "usage_type": "call"}, {"api_name": "form_action_functions_db.db_list", "line_number": 16, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 28, "usage_type": "call"}]}
+{"seq_id": "240377568", "text": "#!/home/rjslater/anaconda3/bin/python\nfrom subprocess import Popen, PIPE, STDOUT\nfrom datetime import date, datetime\nimport astral\nimport time\n\ncmd = 'curl wttr.in?format=\"%C+%t\"'\nps = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT)\noutput = ps.communicate()[0].decode('utf-8').strip()\n\n# Getting condition\ncondition = output[:output.rfind(' ')].strip()\n\n# Getting temperature\nfarenheit = output[output.rfind(' '):].strip()\nif farenheit[0] == '+':\n    farenheit = farenheit[1:]\ncelsius = str(int((int(farenheit[:-2]) - 32) * 5.0/9.0)) + '°C'\n\n# Getting sunrise and sunset\ncityName = 'Columbus'\na = astral.Astral()\na.solar_depression = 'civil'\ncity = a[cityName]\n\nsunrise = time.mktime(city.sun()['sunrise'].timetuple())\nsunset = time.mktime(city.sun()['sunset'].timetuple())\nnow = time.time()\n\n# Weather icons\nawesomeIcons = {'cloud':               '\\uf0c2',\n                'cloud-download-alt':  '\\uf381',\n                'cloud-meatball':      '\\uf73b',\n                'cloud-moon':          '\\uf6c3',\n                'cloud-moon-rain':     '\\uf73c',\n                'cloud-rain':          '\\uf73d',\n                'cloud-showers-heavy': '\\uf740',\n                'cloud-sun':           '\\uf6c4',\n                'cloud-sun-rain':      '\\uf743',\n                'cloud-upload-alt':    '\\uf382',\n                'sun':                 '\\uf185',\n                'moon':                '\\uf186',\n                'wind':                '\\uf72e',\n                'snowflake':           '\\uf2dc'}\n\nicons = {'Cloudy': awesomeIcons['cloud'],\n         'Partly cloudy': awesomeIcons['cloud-sun'],\n         'Overcast': awesomeIcons['cloud'],\n         'Clear': awesomeIcons['sun'],\n         'Sunny': awesomeIcons['sun'],\n         'Patchy rain possible': 'PATCHY RAIN POSSIBLE',\n         'Moderate rain': 'MODERATE RAIN',\n         'Light Rain, Mist': awesomeIcons['cloud-rain']}\n\nif condition in icons.keys():\n    if not(now > sunrise and now < sunset):\n        if icons[condition] == awesomeIcons['sun']:\n            print(awesomeIcons['moon'] + ' {} ({})'.format(celsius, farenheit))\n        elif icons[condition] == awesomeIcons['cloud-sun']:\n            print(awesomeIcons['cloud-moon'] + ' {} ({})'.format(celsius, farenheit))\n        elif icons[condition] == awesomeIcons['cloud-sun-rain']:\n            print(awesomeIcons['cloud-moon-rain'] + ' {} ({})'.format(celsius, farenheit))\n    else:\n        print(icons[condition] + ' {} ({})'.format(celsius, farenheit))\nelse:\n    print(condition + ' ICON NOT FOUND {} ({})'.format(celsius, farenheit))\n", "sub_path": "JARVIS-Blue/i3blocks/scripts/weather.py", "file_name": "weather.py", "file_ext": "py", "file_size_in_byte": 2548, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "subprocess.Popen", "line_number": 8, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 8, "usage_type": "name"}, {"api_name": "subprocess.STDOUT", "line_number": 8, "usage_type": "name"}, {"api_name": "astral.Astral", "line_number": 22, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 26, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 27, "usage_type": "call"}, {"api_name": "time.time", "line_number": 28, "usage_type": "call"}]}
+{"seq_id": "447354837", "text": "import numpy as np\nfrom scipy.ndimage import zoom\nfrom enum import Enum\nimport os\nimport torch\nimport random\n\nuse_cuda = torch.cuda.is_available()\nTENSOR_TYPE = dict(f_tensor=torch.cuda.FloatTensor if use_cuda else torch.FloatTensor,\n                   i_tensor=torch.cuda.LongTensor if use_cuda else torch.LongTensor,\n                   u_tensor=torch.cuda.ByteTensor if use_cuda else torch.ByteTensor)\n\nclass EpisodeStats(object):\n    def __init__(self, episode_lengths, episode_rewards):\n        self.episode_lengths = episode_lengths\n        self.episode_rewards = episode_rewards\n\nclass ExperienceBuffer():\n    def __init__(self, buffer_size=10000):\n        '''\n        store a history of experiences that can be randomly drawn from when training the network. We can draw form the\n        previous past experiment to learn\n        :param buffer_size: size of the buffer\n        '''\n        self.buffer = []\n        self.buffer_size = buffer_size\n\n    def add(self, experience):\n        if len(list(self.buffer)) + len(list(experience)) >= self.buffer_size:\n            self.buffer[0:(len(list(experience)) + len(list(self.buffer))) - self.buffer_size] = []\n        self.buffer.extend([experience])\n\n    def sample(self, size):\n        samples = (random.sample(self.buffer, size))\n        state_batch, action_batch, reward_batch, next_state_batch, done_batch = tuple(zip(*samples))\n        return torch.cat(state_batch).type(TENSOR_TYPE[\"f_tensor\"]), torch.cat(action_batch).type(TENSOR_TYPE[\"i_tensor\"]), torch.cat(TENSOR_TYPE[\"f_tensor\"]([reward_batch])), \\\n               torch.cat(next_state_batch).type(TENSOR_TYPE[\"f_tensor\"]), torch.cat(TENSOR_TYPE[\"f_tensor\"]([done_batch]))\n\n\n\n\nclass NetworkType(Enum):\n    TARGET = 1\n    Q = 2\n\ndef img_rgb2gray(img):\n    \"\"\"\n    convert rgb images to gray scale\n    :param img: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`.\n    :return:\n    \"\"\"\n    image_shape = img.shape\n\n    if len(image_shape) == 3:\n        crop_img = np.dot(img[..., :3], [0.299, 0.587, 0.114]).astype(np.uint8)\n    else:\n        crop_img = np.dot(img[:, ..., :3], [0.299, 0.587, 0.114]).astype(np.uint8)\n    return crop_img\n\n\ndef img_crop_to_bounding_box(img, offset_height, offset_width, target_height, target_width):\n    \"\"\"\n    :param img:4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`.\n    :param offset_height: Vertical coordinate of the top-left corner of the result in the input.\n    :param offset_width: Horizontal coordinate of the top-left corner of the result in the input.\n    :param target_height: Height of the result.\n    :param target_width:Width of the result.\n    :return:\n    \"\"\"\n    image_shape = img.shape\n    if len(image_shape) == 2:\n        return img[offset_height:offset_height + target_height, offset_width:target_width]\n    else:\n        return img[:, offset_height:offset_height + target_height, offset_width:target_width]\n\ndef img_resize(img, resize_factor, order=0):\n    \"\"\"\n    resize a given image\n    :param img: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`.\n    :param resize_factor: float or array for each axis\n    :return:\n    \"\"\"\n    return zoom(img, zoom=resize_factor, order=order)\n\n\ndef state_processor(state, offset_height=34, offset_width=0, target_height=160, target_width=160):\n    \"\"\"\n    Processes a raw Atari iamges. Resizes it and converts it to grayscale.\n    No needed to make it batched because we process one frame at a time, while the network is trained in  batch trough \n    experience replay\n    :param state: A [210, 160, 3] Atari RGB State\n    :return: A processed [84, 84, 1] state representing grayscale values.\n    \"\"\"\n    img_size = state.shape\n    assert img_size[0] == 210\n    assert img_size[1] == 160\n    assert img_size[2] == 3\n\n\n    image = img_rgb2gray(state)      # convert to rgb\n    image = img_crop_to_bounding_box(image, 34, 0, 160, 160)\n    image = img_resize(image, [0.525, 0.525])                # check aspect ration, otherwise convolution dim would not work\n    return torch.from_numpy(image/255.).type(torch.FloatTensor).unsqueeze(dim=0)\n\ndef ensure_dir(file_path):\n    '''\n    Used to ensure to create the a directory when needed\n    :param file_path: path to the file that we want to create\n    '''\n    directory = os.path.dirname(file_path)\n    if not os.path.exists(directory):\n        os.makedirs(directory)\n    return file_path\n", "sub_path": "RL/DeepQLearning/helper.py", "file_name": "helper.py", "file_ext": "py", "file_size_in_byte": 4517, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "torch.cuda.is_available", "line_number": 8, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.cuda", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch.FloatTensor", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch.cuda", "line_number": 10, "usage_type": "attribute"}, {"api_name": "torch.LongTensor", "line_number": 10, "usage_type": "attribute"}, {"api_name": "torch.cuda", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.ByteTensor", "line_number": 11, "usage_type": "attribute"}, {"api_name": "random.sample", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 37, "usage_type": "call"}, {"api_name": "enum.Enum", "line_number": 42, "usage_type": "name"}, {"api_name": "numpy.dot", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 55, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 57, "usage_type": "attribute"}, {"api_name": "scipy.ndimage.zoom", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 103, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 110, "usage_type": "call"}, {"api_name": "os.path", "line_number": 110, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path", "line_number": 111, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 112, "usage_type": "call"}]}
+{"seq_id": "358376076", "text": "from utils import random_string\n\n\nclass Card:\n    def __init__(self, name, attack, defense, cost, special=None):\n        self.id = random_string(20)\n        self.attack = attack\n        self.defense = defense\n        self.cost = cost\n        self.name = name\n\n        if special is None:\n            self.special = special\n        else:\n            self.special = {}\n\n    def get_state(self):\n        return {\n            \"id\": self.id,\n            \"attack\": self.attack,\n            \"defense\": self.defense,\n            \"cost\": self.cost,\n            \"name\": self.name\n        }\n", "sub_path": "server/data/card.py", "file_name": "card.py", "file_ext": "py", "file_size_in_byte": 580, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "utils.random_string", "line_number": 6, "usage_type": "call"}]}
+{"seq_id": "364219986", "text": "\"\"\"\nSpeech to text library (Italian or English)\nPrincipal functions:\n- setLang (setLangIt() or setLangEn() default: It)\n- listen() : return a string (text recognized); end phrase detected by 0.5 second of silence\n- startcontinuelisten() : accumulate text detected in a queue; text can be extracted by getText function\n- getText() : return next text extracted from queue (or None)\n\"\"\"\n\n\nimport speech_recognition as sr\nimport threading  as thd\nimport time\nimport queue\n\nrecogn = sr.Recognizer()\n\naudioq = queue.Queue(10)\ntextq = queue.Queue(5)\n\nlistenrun = True\ndecoderun = True\ncircular = False    #no circular queue (missing last phrases if full)\n                    #if true: circular queue (missing oldest phrases if full)\n\nlang = 'it-IT'\n\ndef setLangIt():\n    global lang\n    lang = 'it-IT'\n\n\ndef setLangEn():\n    global lang\n    lang = 'en-GB'\n\ndef init():\n    for m in enumerate(sr.Microphone.list_microphone_names()):\n        print(m)\n    with sr.Microphone() as source:\n        try:\n            print(\"Using device num:\", source.device_index)\n            recogn.adjust_for_ambient_noise(source)\n            print(\"Adjustment done!\")\n        except:\n            print(\"No microphone found!\")\n\ndef listen(timeout = None, pause = 0.5, level = 1000):\n    with sr.Microphone() as source:\n        recogn.energy_threshold = level\n        recogn.pause_threshold = pause\n        text=\"\"\n        okcode= 0\n        try:\n            audio = recogn.listen(source, timeout)\n            text = recogn.recognize_google(audio, language=lang)\n        except sr.WaitTimeoutError:\n            okcode= -1\n        except sr.UnknownValueError:\n            okcode= -2\n        except sr.RequestError:\n            okcode= -3\n        finally:\n            return text, okcode\n\n\n####################################\n\ndef putAudio(audio):\n    try:\n        audioq.put_nowait(audio)\n    except queue.Full:\n        if circular:\n            audioq.get_nowait()\n            audioq.put_nowait(audio)\n        return\n\ndef getAudio():\n    try:\n        audio = audioq.get_nowait()\n        return audio\n    except:\n        return None\n\ndef putText(txt):\n    try:\n        textq.put_nowait(txt)\n    except queue.Full:\n        if circular:\n            textq.get_nowait()\n            textq.put_nowait(txt)\n        return\n\n\n\ndef listencontinue():\n    global  recogn, listenrun, circular\n    recogn.pause_threshold = 0.5\n    timeout = 2\n    print(\"Start listening...\")\n    while listenrun:\n        try:\n            with sr.Microphone() as source:\n                recogn.energy_threshold = 2000\n                #recogn.adjust_for_ambient_noise(source, 1)\n                audio = recogn.listen(source)\n                putAudio(audio)\n                #print(audioq.qsize())\n        except:\n            time.sleep(0.1)\n            continue\n\n\ndef decodecontinue():\n    global  recogn, decoderun, circular\n    while decoderun:\n        audio = getAudio()\n        if audio is not None:\n            try:\n                text = recogn.recognize_google(audio, language=lang)\n                putText(text)\n                #print(textq.qsize())\n            except:\n                time.sleep(0.1)\n                continue\n\n\n#################################\n\ndef startContinuousListen(circularqueue = False):\n    global listenrun, decoderun, circular\n    circular = circularqueue\n    listenrun = True\n    decoderun = True\n    tha = thd.Thread(name=\"Listen\", target=listencontinue)\n    thb = thd.Thread(name=\"Recogn\", target=decodecontinue)\n    tha.start()\n    thb.start()\n\ndef stopContinuousListen():\n    lock = thd.Lock()\n    lock.acquire()\n    global listenrun, decoderun\n    listenrun = False\n    decoderun = False\n    lock.release()\n\ndef getText():\n    try:\n        text = textq.get_nowait()\n        return text\n    except:\n        return None\n\n\n\n", "sub_path": "TTS_STT/SpeechToTextLib.py", "file_name": "SpeechToTextLib.py", "file_ext": "py", "file_size_in_byte": 3799, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "speech_recognition.Recognizer", "line_number": 16, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 18, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 19, "usage_type": "call"}, {"api_name": "speech_recognition.Microphone.list_microphone_names", "line_number": 38, "usage_type": "call"}, {"api_name": "speech_recognition.Microphone", "line_number": 38, "usage_type": "attribute"}, {"api_name": "speech_recognition.Microphone", "line_number": 40, "usage_type": "call"}, {"api_name": "speech_recognition.Microphone", "line_number": 49, "usage_type": "call"}, {"api_name": "speech_recognition.WaitTimeoutError", "line_number": 57, "usage_type": "attribute"}, {"api_name": "speech_recognition.UnknownValueError", "line_number": 59, "usage_type": "attribute"}, {"api_name": "speech_recognition.RequestError", "line_number": 61, "usage_type": "attribute"}, {"api_name": "queue.Full", "line_number": 72, "usage_type": "attribute"}, {"api_name": "queue.Full", "line_number": 88, "usage_type": "attribute"}, {"api_name": "speech_recognition.Microphone", "line_number": 103, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 110, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 124, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 135, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 136, "usage_type": "call"}, {"api_name": "threading.Lock", "line_number": 141, "usage_type": "call"}]}
+{"seq_id": "50294645", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport copy\nimport json\nimport numpy as np\nimport inspect\nimport sys\n\nimport nussl.audio_signal as audio_signal\nimport nussl.spectral_utils as spectral_utils\nimport nussl.constants as constants\nimport nussl.utils as utils\n\nif sys.version_info.major == 3:\n    basestring, unicode = str, str\n\n\nclass SeparationBase(object):\n    \"\"\"Base class for all separation algorithms in nussl.\n\n    Do not call this. It will not do anything.\n\n    Parameters:\n        input_audio_signal (:obj:`AudioSignal`). ``AudioSignal`` object.\n                            This will always make a copy of the provided AudioSignal object.\n    \"\"\"\n\n    def __init__(self, input_audio_signal):\n        self._audio_signal = None\n\n        if input_audio_signal is not None:\n            self.audio_signal = input_audio_signal\n        else:\n            self.audio_signal = audio_signal.AudioSignal()\n\n    @property\n    def sample_rate(self):\n        \"\"\"(int): Sample rate of ``self.audio_signal``.\n        Literally ``self.audio_signal.sample_rate``.\n        \"\"\"\n        return self.audio_signal.sample_rate\n\n    @property\n    def stft_params(self):\n        \"\"\"(:obj:`StftParams`): ``StftParams`` of ``self.audio_signal``\n        Literally ``self.audio_signal.stft_params``.\n        \"\"\"\n        return self.audio_signal.stft_params\n\n    @property\n    def audio_signal(self):\n        \"\"\"(:obj:`AudioSignal`): Copy of the ``AudioSignal`` object passed in upon initialization.\n        \"\"\"\n        return self._audio_signal\n\n    @audio_signal.setter\n    def audio_signal(self, input_audio_signal):\n        self._audio_signal = copy.copy(input_audio_signal)\n\n    def plot(self, output_name, **kwargs):\n        \"\"\"Plots relevant data for separation algorithm\n\n        Raises:\n            NotImplementedError: Cannot call base class\n        \"\"\"\n        raise NotImplementedError('Cannot call base class.')\n\n    def run(self):\n        \"\"\"Runs separation algorithm\n\n        Raises:\n            NotImplementedError: Cannot call base class\n        \"\"\"\n        raise NotImplementedError('Cannot call base class.')\n\n    def make_audio_signals(self):\n        \"\"\"Makes ``AudioSignal`` objects after separation algorithm is run\n\n        Raises:\n            NotImplementedError: Cannot call base class\n        \"\"\"\n        raise NotImplementedError('Cannot call base class.')\n\n    def to_json(self):\n        return json.dumps(self, default=SeparationBase._to_json_helper)\n\n    @staticmethod\n    def _to_json_helper(o):\n        if not isinstance(o, SeparationBase):\n            raise TypeError\n\n        d = copy.copy(o.__dict__)\n        for k, v in d.items():\n            if isinstance(v, np.ndarray):\n                d[k] = utils.json_ready_numpy_array(v)\n            if isinstance(v, audio_signal.AudioSignal) or isinstance(v, spectral_utils.StftParams):\n                d[k] = v.to_json()\n\n        d['__class__'] = o.__class__.__name__\n        d['__module__'] = o.__module__\n        if 'self' in d:\n            del d['self']\n\n        return d\n\n    @classmethod\n    def from_json(cls, json_string):\n        sep_decoder = SeparationBaseDecoder(cls)\n        return sep_decoder.decode(json_string)\n\n    def __call__(self):\n        self.run()\n\n    def __eq__(self, other):\n        for k, v in self.__dict__.items():\n            if isinstance(v, np.ndarray):\n                if not np.array_equal(v, other.__dict__[k]):\n                    return False\n            elif k == 'self':\n                pass\n            elif v != other.__dict__[k]:\n                return False\n        return True\n\n    def __ne__(self, other):\n        return not self == other\n\n\nclass SeparationBaseDecoder(json.JSONDecoder):\n    \"\"\" Object to decode a ``SeparationBase``-derived object from JSON serialization.\n    You should never have to instantiate this object by hand.\n    \"\"\"\n\n    def __init__(self, separation_class):\n        self.separation_class = separation_class\n        json.JSONDecoder.__init__(self, object_hook=self.json_separation_decoder)\n\n    def json_separation_decoder(self, json_dict):\n        if '__class__' in json_dict:\n            class_name = json_dict.pop('__class__')\n            module_name = json_dict.pop('__module__')\n            if class_name != self.separation_class.__name__ or module_name != self.separation_class.__module__:\n                raise TypeError\n\n            # load the module and import the class\n            module = __import__(module_name)\n            class_ = getattr(module, class_name)\n\n            # we know 'input_audio_signal' is always the first argument\n            signal_json = json_dict.pop('_audio_signal')  # this is the AudioSignal object\n            signal = audio_signal.AudioSignal.from_json(signal_json)\n\n            # get the rest of the required arguments\n            signature = inspect.getargspec(class_.__init__)\n            # first arg is covered above (2), and we don't want the non-defaults (-len(signature.defaults))\n            required_args = signature.args[2:-len(signature.defaults)]\n            args = dict((k.encode('ascii'), json_dict[k]) for k in required_args)\n\n            # make a new instance of separation class\n            seperator = class_(signal, **args)\n\n            # fill out the rest of the fields\n            for k, v in json_dict.items():\n                if isinstance(v, dict) and constants.NUMPY_JSON_KEY in v:\n                    seperator.__dict__[k] = utils.json_numpy_obj_hook(v[constants.NUMPY_JSON_KEY])\n                elif isinstance(v, basestring) and audio_signal.__name__ in v:  # TODO: python3-ify this\n                    seperator.__dict__[k] = audio_signal.AudioSignal.from_json(v)\n                else:\n                    seperator.__dict__[k] = v if not isinstance(v, unicode) else v.encode('ascii')\n\n            return seperator\n        else:\n            return json_dict\n", "sub_path": "nussl/separation_base.py", "file_name": "separation_base.py", "file_ext": "py", "file_size_in_byte": 5865, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "sys.version_info", "line_number": 15, "usage_type": "attribute"}, {"api_name": "nussl.audio_signal.AudioSignal", "line_number": 35, "usage_type": "call"}, {"api_name": "nussl.audio_signal", "line_number": 35, "usage_type": "name"}, {"api_name": "copy.copy", "line_number": 59, "usage_type": "call"}, {"api_name": "nussl.audio_signal.setter", "line_number": 57, "usage_type": "attribute"}, {"api_name": "nussl.audio_signal", "line_number": 57, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 86, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 95, "usage_type": "attribute"}, {"api_name": "nussl.utils.json_ready_numpy_array", "line_number": 96, "usage_type": "call"}, {"api_name": "nussl.utils", "line_number": 96, "usage_type": "name"}, {"api_name": "nussl.audio_signal.AudioSignal", "line_number": 97, "usage_type": "attribute"}, {"api_name": "nussl.audio_signal", "line_number": 97, "usage_type": "name"}, {"api_name": "nussl.spectral_utils.StftParams", "line_number": 97, "usage_type": "attribute"}, {"api_name": "nussl.spectral_utils", "line_number": 97, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 117, "usage_type": "attribute"}, {"api_name": "numpy.array_equal", "line_number": 118, "usage_type": "call"}, {"api_name": "json.JSONDecoder", "line_number": 130, "usage_type": "attribute"}, {"api_name": "json.JSONDecoder.__init__", "line_number": 137, "usage_type": "call"}, {"api_name": "json.JSONDecoder", "line_number": 137, "usage_type": "attribute"}, {"api_name": "nussl.audio_signal.AudioSignal.from_json", "line_number": 152, "usage_type": "call"}, {"api_name": "nussl.audio_signal.AudioSignal", "line_number": 152, "usage_type": "attribute"}, {"api_name": "nussl.audio_signal", "line_number": 152, "usage_type": "name"}, {"api_name": "inspect.getargspec", "line_number": 155, "usage_type": "call"}, {"api_name": "nussl.constants.NUMPY_JSON_KEY", "line_number": 165, "usage_type": "attribute"}, {"api_name": "nussl.constants", "line_number": 165, "usage_type": "name"}, {"api_name": "nussl.utils.json_numpy_obj_hook", "line_number": 166, "usage_type": "call"}, {"api_name": "nussl.utils", "line_number": 166, "usage_type": "name"}, {"api_name": "nussl.constants.NUMPY_JSON_KEY", "line_number": 166, "usage_type": "attribute"}, {"api_name": "nussl.constants", "line_number": 166, "usage_type": "name"}, {"api_name": "nussl.audio_signal.__name__", "line_number": 167, "usage_type": "attribute"}, {"api_name": "nussl.audio_signal", "line_number": 167, "usage_type": "name"}, {"api_name": "nussl.audio_signal.AudioSignal.from_json", "line_number": 168, "usage_type": "call"}, {"api_name": "nussl.audio_signal.AudioSignal", "line_number": 168, "usage_type": "attribute"}, {"api_name": "nussl.audio_signal", "line_number": 168, "usage_type": "name"}]}
+{"seq_id": "606084071", "text": "\n\nimport logging; log = logging.getLogger(__name__)\nDEBUG = log.debug; INFO = log.info; WARN = log.warning; ERROR = log.error\n\n\nclass BaseView(object):\n\n    def __init__(self, context, request):\n        self.context = context\n        self.request = request\n\n    def __call__(self):\n\n        method = self.request.method.lower()\n        f = getattr(self, method, None)\n        if f:\n            return f()\n\n        WARN(\"no method %s on %s\" % (method, self))\n        raise HTTPMethodNotAllowed\n\n    # allow CORS pre-flight, headers are added in NewResponse event handler\n    def options(self):\n        return Response()\n", "sub_path": "crowdpong_api/store.py", "file_name": "store.py", "file_ext": "py", "file_size_in_byte": 619, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "logging.getLogger", "line_number": 3, "usage_type": "call"}]}
+{"seq_id": "586228376", "text": "#!/usr/bin/env python3\n\"\"\"\nYou are given a Double Link List with one pointer of each node pointing to the next node just\nlike in a single link list. The second pointer however CAN point to any node in the list and\nnot just the previous node. \n\nNow write a program in O(n) time to duplicate (clone) this list.\nThat is, write a program which will create a copy of this list.\nAll of those random pointers need to point to the same corresponing\nnodes in the new list.\n\nEXAMPLES:\n   Each node has 'val', 'next', and 'random' attributes.\n   Only the [val, random] will be shown in the example:\n\n   Input 1 = (7,None) -> (13,0) -> (11,4) -> (10,2) -> (1,0) -> None\n\n   Input 2 = (1,1) -> (2,1) -> None\n\nREF:\n  - https://leetcode.com/problems/copy-list-with-random-pointer/ (Medium)\n  - https://www.geeksforgeeks.org/a-linked-list-with-next-and-arbit-pointer/\n\n\"\"\"\n\nfrom typing import List\n\n\nclass Node:\n    def __init__(self, x:int, next:'Node'=None, random:'Node'=None):\n        self.val = int(x)\n        self.next = next\n        self.random = random\n\n\nclass Solution:\n\n    def copyRandomList_v1(self, head: 'Node') -> 'Node':\n        \"\"\"Dictionary & Array.\n\n        Helper data structures:\n        - Dictionary: {original_node: index}\n        - Array: [new_node]\n\n        Then, origina_node -> original_random_node -> index -> new_random_node\n\n        Use 3 x O(n) extra space.\n        LeatCode: 36ms, 14.4 MB, beats 64%.\n        \"\"\"\n        new_nodes = list()\n        prehead = Node(0)\n\n        # Convert random pointers to indexes.\n        index_map = dict()\n        p = head\n        q = prehead\n        i = 0\n        while p:\n            # Update index map\n            index_map[p] = i\n            i += 1\n\n            # Clone node with value & keep a copy in new_nodes\n            q.next = q = Node(p.val)\n            new_nodes.append(q)\n            p = p.next\n\n        # Update the random pointer\n        p = head\n        q = prehead.next\n        while (p and q):\n            # Get random index\n            r = p.random\n            if r is not None:\n                i = index_map[r]\n                q.random = new_nodes[i]\n            p = p.next\n            q = q.next\n\n        return prehead.next\n\n    def copyRandomList_v2(self, head: 'Node') -> 'Node':\n        \"\"\"Dictionary.\n\n        Use a dictionay to connect original list with the new list.\n        It doesn't need to know the index position of the random link.\n        This may not be faster than v1.\n\n        Use 2 x O(n) extra space.\n        LeatCode: 32 ms, 14.4 MB, beats 86.78%.\n        \"\"\"\n        prehead = Node(0)\n\n        # Convert random pointers to indexes.\n        node_map = dict()\n        p = head\n        q = prehead\n        while p:\n            # Here we copy both the value and the random pointer\n            q.next = q = Node(p.val, None, p.random)\n            node_map[p] = q\n            p = p.next\n\n        # Update the random pointer\n        q = prehead.next\n        while q:\n            # This is the random pointer from the original list\n            r = q.random\n            if r is not None:\n                # Use the dictionary to get the corresponding node\n                q.random = node_map[r]\n            q = q.next\n\n        return prehead.next\n\n# ----------------\n#   Main\n# ----------------\ndef make_list(a: List) -> 'Node':\n    \"\"\"Create\n    :param a: a list of (val,random) pairs\n    \"\"\"\n    p = prehead = Node(0)\n    # Build the single list and keep them in Python list\n    tmp_list = list()\n    for x in a:\n        p.next = p = Node(x[0])\n        tmp_list.append(p)\n\n    # Handle the random attribute\n    p = prehead.next\n    n = len(tmp_list)\n    for x in a:\n        i = x[1]\n        if i is not None and i < n:\n            p.random = tmp_list[i]\n        else:\n            p.random = None\n        p = p.next\n\n    return prehead.next\n\n\ndef get_val_random_list(p: Node) -> str:\n    \"\"\"Convert a ListNode to a string.\"\"\"\n\n    head = p\n    # Store the list in a dictionary\n    index_map = dict()\n    i = 0\n    while p:\n        index_map[p] = i\n        i += 1\n        p = p.next\n        \n    # Build a list of (val, random_index)\n    retvals = list()\n    p = head\n    while (p):\n        val = p.val\n        r = p.random\n        random_index = index_map[r] if (r in index_map) else None\n        retvals.append([val, random_index])\n        p = p.next\n\n    return retvals\n\n\ndef main():\n    \"\"\"Main function\"\"\"\n\n    test_data = [\n        [[7, None], [13, 0], [11, 4], [10, 2], [1, 0]],\n        [[1, 1], [2, 1]],\n        [[3, None], [3, 0], [3, None]],\n        [],\n    ]\n\n    sol = Solution()\n    for x in test_data:\n        print(\"# Input =\", x)\n        head = make_list(x)\n        print(\"  1> =\", get_val_random_list(sol.copyRandomList_v1(head)))\n        print(\"  2> =\", get_val_random_list(sol.copyRandomList_v2(head)))\n\n\nif __name__ == \"__main__\":\n    main()\n", "sub_path": "python3/linked_list/copy_list_with_random_pointer.py", "file_name": "copy_list_with_random_pointer.py", "file_ext": "py", "file_size_in_byte": 4846, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "typing.List", "line_number": 119, "usage_type": "name"}]}
+{"seq_id": "162976611", "text": "from tkinter import *\r\nfrom PIL import Image,ImageTk\r\n\r\nwindow = Tk()\r\n\r\nwindow.title('Smart Car Parking System')\r\nwindow.iconbitmap(r'C:\\Users\\LENOVO OFFICIAL\\Desktop\\Mini-Project-III\\Resources\\car.ico')\r\nwindow.configure(background = \"black\")\r\n\r\nheading = Label(window, text = 'Smart Car Parking System', bg = \"black\", fg = \"white\",font = \"Perpetua 24 bold\")\r\nheading.grid(row = 0,column = 0)\r\n\r\nframe = LabelFrame(window,padx = 100, pady = 50)\r\nframe.grid(padx = 10, pady = 10)\r\nframe.configure(background = \"white\")\r\n\r\ndeathwing = Image.open(r'C:\\Users\\LENOVO OFFICIAL\\Desktop\\Mini-Project-III\\Resources\\smart.png')\r\nimage = deathwing.resize((455,307),Image.ANTIALIAS)\r\nDeathwing = ImageTk.PhotoImage(image)\r\nimagelabel = Label(frame,image = Deathwing,borderwidth = 0)\r\nimagelabel.grid(row = 0,column = 0)\r\n\r\n\r\ndef click_start():\r\n    window.destroy()\r\n    \r\n    #second page starts here \r\n    \r\n    root = Tk()\r\n    root.title('User')\r\n    root.iconbitmap(r'C:\\Users\\LENOVO OFFICIAL\\Desktop\\Mini-Project-III\\Final-Code\\Resources\\car.ico')\r\n    root.configure(background = \"black\")\r\n\r\n    new_frame = LabelFrame(root,padx = 100, pady = 80)\r\n    new_frame.grid(padx = 15, pady = 15)\r\n    new_frame.configure(background = \"white\")\r\n    \r\n    print(\"This has been passsed\")\r\n    \r\n    def click_new_user():\r\n        root.destroy()\r\n        \r\n        #third page.new user starts here\r\n        \r\n        root1 = Tk()\r\n\r\n        root1.title('New User')\r\n        root1.iconbitmap(r'C:\\Users\\LENOVO OFFICIAL\\Desktop\\Mini-Project-III\\Resources\\car.ico')\r\n        root1.configure(background = \"black\")\r\n\r\n        frame = LabelFrame(root1, text = 'Sign-In',padx = 100, pady = 120,font = \"Perpetua 15 \")\r\n        frame.grid(padx = 15, pady = 15)\r\n        frame.configure(background = \"white\")\r\n\r\n        labelName = Label(frame,text = \"Enter your Name : \",bg = \"white\",pady = 5,font = \"Perpetua 15 \")\r\n        labelName.grid(row = 0,column = 0,sticky = 'w')\r\n        labelPhone = Label(frame, text = \"Enter your Phone Number : \",bg = \"white\",pady = 5,font = \"Perpetua 15 \")\r\n        labelPhone.grid(row = 5,column = 0,sticky = 'w')\r\n        labelEmailID = Label(frame,text = \"Enter your Email ID : \",bg = \"white\",pady = 5,font = \"Perpetua 15 \")\r\n        labelEmailID.grid(row = 10,column = 0,sticky = 'w')\r\n        labelVehicleNumber = Label(frame,text = \"Enter your Vehicle Number : \",bg = \"white\",pady = 5,font = \"Perpetua 15 \")\r\n        labelVehicleNumber.grid(row = 15,column = 0,sticky = 'w')\r\n\r\n        entryName = Entry(frame, width = 30,fg = \"black\",bg = \"#EFE4E4\")\r\n        entryName.grid(row = 0,column = 5,sticky = 'w')\r\n\r\n        entryPhone = Entry(frame,width = 30, fg = \"black\",bg = \"#EFE4E4\")\r\n        entryPhone.grid(row = 5,column = 5,sticky = 'w')\r\n\r\n        entryEmailID = Entry(frame,width = 30, fg = \"black\",bg = \"#EFE4E4\")\r\n        entryEmailID.grid(row = 10,column = 5,sticky = 'w')\r\n\r\n        entryVehicleNumber = Entry(frame,width = 30,fg = \"black\",bg = \"#EFE4E4\")\r\n        entryVehicleNumber.grid(row = 15,column = 5,sticky = 'w')\r\n\r\n        def click_next():\r\n            label = Label(frame,text = 'All the Fields are required for successfull Login !',fg = 'red',bg = \"white\")\r\n            label.grid(row = 20,column = 5)\r\n            name = entryName.get()\r\n\r\n            entryName.delete(0,END)\r\n            entryPhone.delete(0,END)\r\n            entryEmailID.delete(0,END)\r\n            entryVehicleNumber.delete(0,END)\r\n\r\n            root2 = Tk()\r\n            root2.title('Camera')\r\n            root2.iconbitmap(r'C:\\Users\\LENOVO OFFICIAL\\Desktop\\Mini-Project-III\\Resources\\car.ico')\r\n            root2.geometry('400x400')\r\n            root2.configure(background = \"white\")\r\n\r\n            details = Label(root2, text = \"Hello \" + name,fg = 'red',bg = \"white\")\r\n            details.grid(row = 55,column = 5)\r\n            \r\n            labelName = Label(root2,text = \"Camera takes Picture\")\r\n            labelName.grid(row = 5,column = 0)\r\n\r\n        def click_clear():\r\n            entryName.delete(0,END)\r\n            entryPhone.delete(0,END)\r\n            entryEmailID.delete(0,END)\r\n            entryVehicleNumber.delete(0,END)\r\n            \r\n        print(\"2nd Pass\")\r\n        next_button_image = PhotoImage(file = r\"C:\\Users\\LENOVO OFFICIAL\\Desktop\\Mini-Project-III\\Resources\\next.png\")\r\n        nextButton = Button(frame,image = next_button_image, command = click_next, height = 140, width = 118, bg = \"white\",borderwidth = 0)\r\n        nextButton.image = next_button_image\r\n        nextButton.grid(row = 25,column = 2)\r\n\r\n        clearbtn = PhotoImage(file = r\"C:\\Users\\LENOVO OFFICIAL\\Desktop\\Mini-Project-III\\Resources\\trash.png\")\r\n        btn = Button(frame,image = clearbtn,command = click_clear ,height = 100, width = 114, bg = \"white\",borderwidth = 0)\r\n        btn.image = clearbtn\r\n        btn.grid(row = 25,column = 0)\r\n        exit_img = PhotoImage(file = r\"C:\\Users\\LENOVO OFFICIAL\\Desktop\\Mini-Project-III\\Resources\\exit.png\")\r\n        exit_button = Button(root1,text = 'Exit', command = root1.quit,image = exit_img,height = 50,bg = \"white\", width = 50,borderwidth = 0)\r\n        exit_button.image = exit_img\r\n        exit_button.place(x = 18,y = 500)\r\n       \r\n        #third page.new user ends here\r\n    \r\n\r\n    def click_existing_user():\r\n        root.destroy()\r\n        \r\n        #third page.existing user ends here\r\n        \r\n        root3 = Tk() \r\n        root3.title('Existing User')\r\n        root3.iconbitmap(r'C:\\Users\\LENOVO OFFICIAL\\Desktop\\Mini-Project-III\\Resources\\car.ico')\r\n        root3.configure(background = \"black\")\r\n\r\n        frame = LabelFrame(root3,padx = 70, pady = 100,font = \"Perpetua 15 \")\r\n        frame.grid(padx = 15, pady = 15)\r\n        frame.configure(background = \"white\")\r\n\r\n        def click_pick_up():\r\n            root4 = Tk()\r\n            root4.title('User')\r\n            root4.iconbitmap(r'C:\\Users\\LENOVO OFFICIAL\\Desktop\\Mini-Project-III\\Resources\\car.ico')\r\n            clearlabel = Label(root4,text = \"Camera takes Picture\")\r\n            clearlabel.pack()\r\n\r\n        pick_up_image = PhotoImage(file = r\"C:\\Users\\LENOVO OFFICIAL\\Desktop\\Mini-Project-III\\Resources\\pickup.png\")\r\n        pickup_button = Button(frame,image =  pick_up_image,command = click_pick_up ,height = 230, width = 290, bg = \"white\",borderwidth=0)\r\n        pickup_button.image = pick_up_image\r\n        pickup_button.grid(row = 0,column = 0)\r\n\r\n        blank_label = Label(frame,text = \"                  \",bg = \"white\")\r\n        blank_label.grid(row = 0,column = 1)\r\n\r\n        drop_off_image = PhotoImage(file = r\"C:\\Users\\LENOVO OFFICIAL\\Desktop\\Mini-Project-III\\Resources\\dropoff.png\")\r\n        dropoff_button = Button(frame,image = drop_off_image,command = open ,height = 215, width = 295, bg = \"white\",borderwidth=0)\r\n        dropoff_button.image = drop_off_image\r\n        dropoff_button.grid(row = 0, column = 2)\r\n\r\n        exit_img = PhotoImage(file = \"exit.png\")\r\n        exit_button = Button(root3,image = exit_img,command = root3.quit ,height = 50 , width = 50, bg = \"white\",borderwidth = 0)\r\n        exit_button.image = exit_img\r\n        exit_button.place(x = 18,y = 350)\r\n        #third page.existing user ends here\r\n\r\n  \r\n    \r\n    new_user_img = PhotoImage(file = r\"C:\\Users\\LENOVO OFFICIAL\\Desktop\\Mini-Project-III\\Resources\\new-user.png\")    \r\n    new_user_button = Button(new_frame, text = \"New User\",image = new_user_img , command = click_new_user,compound = TOP,borderwidth = 0,bg = \"white\",font = \"Perpetua 15 \")\r\n    new_user_button.image = new_user_img\r\n    new_user_button.grid(row = 0,column = 0)\r\n\r\n    blank_label = Label(new_frame,text = \"             \",bg = \"white\")\r\n    blank_label.grid(row = 0,column = 1)\r\n\r\n    existing_user_img = PhotoImage(file = r\"C:\\Users\\LENOVO OFFICIAL\\Desktop\\Mini-Project-III\\Resources\\face-detection.png\")\r\n    existing_user_button = Button(new_frame, text = \"Existing User\",image = existing_user_img,command = click_existing_user,compound = TOP,borderwidth = 0,bg = \"white\",font = \"Perpetua 15 \")\r\n    existing_user_button.image = existing_user_img\r\n    existing_user_button.grid(row = 0,column = 2)\r\n\r\n    exit_img = PhotoImage(file = r\"C:\\Users\\LENOVO OFFICIAL\\Desktop\\Mini-Project-III\\Resources\\exit.png\")\r\n    exit_button = Button(root,image = exit_img,command = root.quit ,height = 50 , width = 50, bg = \"white\",borderwidth = 0)\r\n    exit_button.image = exit_img\r\n    exit_button.place(x=18,y=300) \r\n    #2nd page ends here\r\n    \r\n\r\n    \r\nstart_img = PhotoImage(file = r\"C:\\Users\\LENOVO OFFICIAL\\Desktop\\Mini-Project-III\\Resources\\start.png\")\r\nstart_button = Button(frame,image = start_img, command = click_start, height = 120, width = 190, bg = \"white\",borderwidth = 0)\r\nstart_button.image = start_img\r\nstart_button.grid(row = 1,column = 0)\r\n\r\nexit_img = PhotoImage(file = r\"C:\\Users\\LENOVO OFFICIAL\\Desktop\\Mini-Project-III\\Resources\\exit.png\")\r\nexit_button = Button(window,image = exit_img,command = window.quit ,height = 50 , width = 50, bg = \"white\",borderwidth = 0)\r\nexit_button.image = exit_img\r\nexit_button.place(x = 15,y = 510)\r\n\r\nwindow.mainloop()\r\n", "sub_path": "CopyOfGUI/final.py", "file_name": "final.py", "file_ext": "py", "file_size_in_byte": 9070, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "PIL.Image.open", "line_number": 17, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 17, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 18, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 18, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 19, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 19, "usage_type": "name"}]}
+{"seq_id": "569453614", "text": "\"\"\"empty message\n\nRevision ID: ff2919be654e\nRevises: e8cad27e9a52\nCreate Date: 2019-11-19 04:45:47.171808\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = 'ff2919be654e'\ndown_revision = 'e8cad27e9a52'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n    # ### commands auto generated by Alembic - please adjust! ###\n    op.create_unique_constraint(None, 'class17', ['class_num'])\n    op.add_column('s_t', sa.Column('student_id', sa.Integer(), nullable=False))\n    op.drop_constraint('s_t_ibfk_1', 's_t', type_='foreignkey')\n    op.create_foreign_key(None, 's_t', 'student', ['student_id'], ['id'])\n    op.drop_column('s_t', 'class17_id')\n    op.drop_constraint('teacher_ibfk_1', 'teacher', type_='foreignkey')\n    op.drop_column('teacher', 'class_id')\n    # ### end Alembic commands ###\n\n\ndef downgrade():\n    # ### commands auto generated by Alembic - please adjust! ###\n    op.add_column('teacher', sa.Column('class_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))\n    op.create_foreign_key('teacher_ibfk_1', 'teacher', 'class17', ['class_id'], ['id'])\n    op.add_column('s_t', sa.Column('class17_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))\n    op.drop_constraint(None, 's_t', type_='foreignkey')\n    op.create_foreign_key('s_t_ibfk_1', 's_t', 'class17', ['class17_id'], ['id'])\n    op.drop_column('s_t', 'student_id')\n    op.drop_constraint(None, 'class17', type_='unique')\n    # ### end Alembic commands ###\n", "sub_path": "flask_admin_test/migrations/versions/ff2919be654e_.py", "file_name": "ff2919be654e_.py", "file_ext": "py", "file_size_in_byte": 1570, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "alembic.op.create_unique_constraint", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "alembic.op.add_column", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 22, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op.drop_constraint", "line_number": 23, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 23, "usage_type": "name"}, {"api_name": "alembic.op.create_foreign_key", "line_number": 24, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 24, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 25, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 25, "usage_type": "name"}, {"api_name": "alembic.op.drop_constraint", "line_number": 26, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 26, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 27, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 27, "usage_type": "name"}, {"api_name": "alembic.op.add_column", "line_number": 33, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 33, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql.INTEGER", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql", "line_number": 33, "usage_type": "name"}, {"api_name": "alembic.op.create_foreign_key", "line_number": 34, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 34, "usage_type": "name"}, {"api_name": "alembic.op.add_column", "line_number": 35, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 35, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql.INTEGER", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql", "line_number": 35, "usage_type": "name"}, {"api_name": "alembic.op.drop_constraint", "line_number": 36, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 36, "usage_type": "name"}, {"api_name": "alembic.op.create_foreign_key", "line_number": 37, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 37, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 38, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 38, "usage_type": "name"}, {"api_name": "alembic.op.drop_constraint", "line_number": 39, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 39, "usage_type": "name"}]}
+{"seq_id": "194713229", "text": "import pandas as pd\n\nimport glog\nimport multiprocessing as mp\n\nfrom recoder.model import Recoder\nfrom recoder.data import RecommendationDataset\nfrom recoder.metrics import AveragePrecision, Recall, NDCG\nfrom recoder.nn import DynamicAutoencoder, MatrixFactorization\nfrom recoder.utils import dataframe_to_csr_matrix\n\n\ndata_dir = 'data/msd/'\nmodel_dir = 'models/msd/'\n\ncommon_params = {\n  'user_col': 'uid',\n  'item_col': 'sid',\n  'inter_col': 'listen',\n}\n\nglog.info('Loading Data...')\n\ntrain_df = pd.read_csv(data_dir + 'train.csv')\nval_tr_df = pd.read_csv(data_dir + 'validation_tr.csv')\nval_te_df = pd.read_csv(data_dir + 'validation_te.csv')\n\n# uncomment it to train with MatrixFactorization\n# train_df = train_df.append(val_tr_df)\n\ntrain_matrix, item_id_map, _ = dataframe_to_csr_matrix(train_df, **common_params)\nval_tr_matrix, _, user_id_map = dataframe_to_csr_matrix(val_tr_df, item_id_map=item_id_map,\n                                                        **common_params)\nval_te_matrix, _, _ = dataframe_to_csr_matrix(val_te_df, item_id_map=item_id_map,\n                                              user_id_map=user_id_map, **common_params)\n\ntrain_dataset = RecommendationDataset(train_matrix)\nval_tr_dataset = RecommendationDataset(val_tr_matrix, val_te_matrix)\n\nuse_cuda = True\n\nmodel = DynamicAutoencoder(hidden_layers=[200], activation_type='tanh',\n                           noise_prob=0.5, sparse=True)\n\n# model = MatrixFactorization(embedding_size=200, activation_type='tanh',\n#                             dropout_prob=0.5, sparse=False)\n\ntrainer = Recoder(model=model, use_cuda=use_cuda, optimizer_type='adam',\n                  loss='logistic', user_based=False)\n\n# trainer.init_from_model_file(model_dir + 'bce_ns_d_0.0_n_0.5_200_epoch_50.model')\nmodel_checkpoint = model_dir + 'bce_ns_d_0.0_n_0.5_200'\n\nmetrics = [Recall(k=20, normalize=True), Recall(k=50, normalize=True),\n           NDCG(k=100)]\n\ntry:\n  trainer.train(train_dataset=train_dataset, val_dataset=val_tr_dataset,\n                batch_size=500, lr=1e-3, weight_decay=2e-5,\n                num_epochs=100, negative_sampling=True,\n                lr_milestones=[60, 80],\n                num_data_workers=mp.cpu_count() if use_cuda else 0,\n                model_checkpoint_prefix=model_checkpoint,\n                checkpoint_freq=10, eval_num_recommendations=100,\n                metrics=metrics, eval_freq=10)\nexcept (KeyboardInterrupt, SystemExit):\n  trainer.save_state(model_checkpoint)\n  raise\n", "sub_path": "scripts/msd/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 2483, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "glog.info", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 24, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 26, "usage_type": "call"}, {"api_name": "recoder.utils.dataframe_to_csr_matrix", "line_number": 31, "usage_type": "call"}, {"api_name": "recoder.utils.dataframe_to_csr_matrix", "line_number": 32, "usage_type": "call"}, {"api_name": "recoder.utils.dataframe_to_csr_matrix", "line_number": 34, "usage_type": "call"}, {"api_name": "recoder.data.RecommendationDataset", "line_number": 37, "usage_type": "call"}, {"api_name": "recoder.data.RecommendationDataset", "line_number": 38, "usage_type": "call"}, {"api_name": "recoder.nn.DynamicAutoencoder", "line_number": 42, "usage_type": "call"}, {"api_name": "recoder.model.Recoder", "line_number": 48, "usage_type": "call"}, {"api_name": "recoder.metrics.Recall", "line_number": 54, "usage_type": "call"}, {"api_name": "recoder.metrics.NDCG", "line_number": 55, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 62, "usage_type": "call"}]}
+{"seq_id": "490664505", "text": "import logging\nimport numpy as np\nfrom time import time\nimport utils as U\n\nlogging.basicConfig(\n    # filename='out.log',\n    level=logging.INFO,\n    format='%(asctime)s %(levelname)s %(message)s')\nlogger = logging.getLogger(__name__)\n\n###############################################################################################################################\n## Parse arguments\n#\n\nparser = U.add_common_args()\nparser.add_argument(\"-e\", \"--embdim\", dest=\"emb_dim\", type=int, metavar='', default=100,\n                    help=\"Embeddings dimension (default=100)\")\nparser.add_argument(\"-as\", \"--aspect-size\", dest=\"aspect_size\", type=int, metavar='', default=15,\n                    help=\"The number of aspects specified by users (default=14)\")\nparser.add_argument(\"--emb-name\",  type=str,\n                    help=\"The name to the word embeddings file\", default=\"w2v_64k_unigram_100d.model\")\nparser.add_argument(\"--epochs\", dest=\"epochs\", type=int, metavar='', default=200,\n                    help=\"Number of epochs (default=15)\")\nparser.add_argument(\"-n\", \"--neg-size\", dest=\"neg_size\", type=int, metavar='', default=20,\n                    help=\"Number of negative instances (default=20)\")\nparser.add_argument(\"--seed\", dest=\"seed\", type=int, metavar='', default=1234,\n                    help=\"Random seed (default=1234)\")\nparser.add_argument(\"-a\", \"--algorithm\", dest=\"algorithm\", type=str, metavar='', default='adam',\n                    help=\"Optimization algorithm (rmsprop|sgd|adagrad|adadelta|adam|adamax) (default=adam)\")\nparser.add_argument(\"--ortho-reg\", dest=\"ortho_reg\", type=float, metavar='', default=0.1,\n                    help=\"The weight of orthogonal regularization (default=0.1)\")\nargs = parser.parse_args()\n\nout_dir = args.out_dir_path + '/' + args.domain\nU.mkdir_p(out_dir)\nU.print_args(args)\n\nassert args.algorithm in {'rmsprop', 'sgd', 'adagrad', 'adadelta', 'adam', 'adamax'}\n# assert args.domain in {'restaurant', 'beer'}\n\nif args.seed > 0:\n    np.random.seed(args.seed)\n\n# ###############################################################################################################################\n# ## Prepare data\n# #\n\nfrom keras.preprocessing import sequence\nimport reader as dataset\n\nvocab, train_x, test_x, overall_maxlen = dataset.get_data(args.domain, vocab_size=args.vocab_size, maxlen=args.maxlen)\ntrain_x = sequence.pad_sequences(train_x, maxlen=overall_maxlen)\ntest_x = sequence.pad_sequences(test_x, maxlen=overall_maxlen)\n\n# train_x = train_x[0:30000]\nprint('Number of training examples: ', len(train_x))\nprint('Length of vocab: ', len(vocab))\n\n\ndef sentence_batch_generator(data, batch_size):\n    n_batch = len(data) // batch_size\n    batch_count = 0\n    np.random.shuffle(data)\n\n    while True:\n        if batch_count == n_batch:\n            np.random.shuffle(data)\n            batch_count = 0\n\n        batch = data[batch_count * batch_size: (batch_count + 1) * batch_size]\n        batch_count += 1\n        yield batch\n\n\ndef negative_batch_generator(data, batch_size, neg_size):\n    data_len = data.shape[0]\n    dim = data.shape[1]\n\n    while True:\n        indices = np.random.choice(data_len, batch_size * neg_size)\n        samples = data[indices].reshape(batch_size, neg_size, dim)\n        yield samples\n\n\n###############################################################################################################################\n## Optimizaer algorithm\n#\n\nfrom optimizers import get_optimizer\n\noptimizer = get_optimizer(args)\n\n###############################################################################################################################\n## Building model\n\nfrom model import create_model\nfrom gensim.models import KeyedVectors\nimport keras.backend as K\n\nlogger.info('  Building model')\nmodel = create_model(args, overall_maxlen, vocab)\n# freeze the word embedding layer\nmodel.get_layer('word_emb').trainable = False\nmodel.compile(optimizer=optimizer, loss=U.max_margin_loss, metrics=[U.max_margin_loss])\n\nemb_model = KeyedVectors.load(os.path.join(\".\", \"preprocessed_data\", args.domain, args.emb_name))\n###############################################################################################################################\n## Training\n#\nfrom tqdm import tqdm\n\nlogger.info(\"-\"*80)\n\nvocab_inv = {}\nfor w, ind in vocab.items():\n    vocab_inv[ind] = w\n\nsen_gen = sentence_batch_generator(train_x, args.batch_size)\nneg_gen = negative_batch_generator(train_x, args.batch_size, args.neg_size)\nbatches_per_epoch = len(train_x) // args.batch_size\n\ne_aspect = ['음질', '만족감', '디자인', '배터리', '블루투스', '착용']\n\nmin_loss = float('inf')\nfor ii in range(args.epochs):\n    t0 = time()\n    loss, max_margin_loss = 0., 0.\n\n    for b in tqdm(range(batches_per_epoch)):\n        sen_input = next(sen_gen)\n        neg_input = next(neg_gen)\n\n        batch_loss, batch_max_margin_loss = model.train_on_batch([sen_input, neg_input],\n                                                                 np.ones((args.batch_size, 1)))\n        loss += batch_loss / batches_per_epoch\n        max_margin_loss += batch_max_margin_loss / batches_per_epoch\n\n    tr_time = time() - t0\n\n    if loss < min_loss:\n        min_loss = loss\n        word_emb = K.get_value(model.get_layer('word_emb').embeddings)\n        aspect_emb = K.get_value(model.get_layer('aspect_emb').W)\n        word_emb = word_emb / np.linalg.norm(word_emb, axis=-1, keepdims=True)\n        aspect_emb = aspect_emb / np.linalg.norm(aspect_emb, axis=-1, keepdims=True)\n        aspect_file = open(out_dir + '/aspect.log', 'wt', encoding='utf-8')\n        model.save(out_dir + '/model_param')\n\n        for ind in range(len(aspect_emb)):\n            desc = aspect_emb[ind]\n            sims = word_emb.dot(desc.T)\n            ordered_words = np.argsort(sims)[::-1]\n            desc_list = [vocab_inv[w] for w in ordered_words[:100]]\n\n            aspect_sims = {}\n            for label in e_aspect:\n                aspect_sims[label] = np.array([emb_model.wv.similarity(v_word, w) for w in desc_list]).mean()\n            \n            if max(aspect_sims.values()) > 0.3:\n                aspect_label = max(aspect_sims.keys(), key=lambda k:aspect_sims[k])\n            else:\n                aspect_label = None\n            print('Aspect {}: {}'.format(ind, aspect_label))\n            print(desc_list)\n            aspect_file.write('Aspect {}: {}'.format(ind, aspect_label))\n            aspect_file.write(' '.join(desc_list) + '\\n\\n')\n\n    logger.info('Epoch %d, train: %is' % (ii, tr_time))\n    logger.info(\n        'Total loss: %.4f, max_margin_loss: %.4f, ortho_reg: %.4f' % (loss, max_margin_loss, loss - max_margin_loss))\n", "sub_path": "ABAE/code/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 6694, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "logging.basicConfig", "line_number": 6, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 8, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 10, "usage_type": "call"}, {"api_name": "utils.add_common_args", "line_number": 16, "usage_type": "call"}, {"api_name": "utils.mkdir_p", "line_number": 36, "usage_type": "call"}, {"api_name": "utils.print_args", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 43, "usage_type": "attribute"}, {"api_name": "reader.get_data", "line_number": 52, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 53, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence", "line_number": 53, "usage_type": "name"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 54, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence", "line_number": 54, "usage_type": "name"}, {"api_name": "numpy.random.shuffle", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 64, "usage_type": "attribute"}, {"api_name": "numpy.random.shuffle", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 68, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 81, "usage_type": "attribute"}, {"api_name": "optimizers.get_optimizer", "line_number": 92, "usage_type": "call"}, {"api_name": "model.create_model", "line_number": 102, "usage_type": "call"}, {"api_name": "model.get_layer", "line_number": 104, "usage_type": "call"}, {"api_name": "model.compile", "line_number": 105, "usage_type": "call"}, {"api_name": "utils.max_margin_loss", "line_number": 105, "usage_type": "attribute"}, {"api_name": "gensim.models.KeyedVectors.load", "line_number": 107, "usage_type": "call"}, {"api_name": "gensim.models.KeyedVectors", "line_number": 107, "usage_type": "name"}, {"api_name": "time.time", "line_number": 127, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 130, "usage_type": "call"}, {"api_name": "model.train_on_batch", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 135, "usage_type": "call"}, {"api_name": "time.time", "line_number": 139, "usage_type": "call"}, {"api_name": "keras.backend.get_value", "line_number": 143, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 143, "usage_type": "name"}, {"api_name": "model.get_layer", "line_number": 143, "usage_type": "call"}, {"api_name": "keras.backend.get_value", "line_number": 144, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 144, "usage_type": "name"}, {"api_name": "model.get_layer", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 145, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 146, "usage_type": "attribute"}, {"api_name": "model.save", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 158, "usage_type": "call"}]}
+{"seq_id": "548779116", "text": "\n# coding: utf-8\n\n# In[3]:\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn import preprocessing, cross_validation, neighbors, metrics, linear_model, ensemble, grid_search, cluster\nimport matplotlib.pyplot as plt\n\ndf = pd.read_csv('train.csv')\ntest = pd.read_csv('test.csv')\nprint(df.head())\n#print(test.head())\n\n\n# In[4]:\n\nimport statistics as s\n#print(s.mean(df.Age))\n#print(df.describe())\nm = df.describe().transpose()['mean']\nthq = df.describe().transpose()['75%']\n#print(m.Age)\n\ndf.Age = df.Age.fillna(value=m.Age)\ndf['Child'] = np.where(df['Age'] < 18, 1, 0)\n\nprint(df.head())\n\nmtest = test.describe().transpose()['mean']\ntest.Age = test.Age.fillna(value=mtest.Age)\ntest['Child'] = np.where(test['Age'] < 18, 1, 0)\n\n\n# In[5]:\n\n#Feature Engineering Training\n\nimport re\ntitle = []\nfor i in range(len(df.Name)):\n    title.append(re.split(', |\\. ',df.Name[i])[1])\n\nprint(title)\n\ndf['Title'] = title\ndf.Title.value_counts()\n\nfor i in range(len(df.Title)):\n    if df.Title[i] == 'Mme':\n        df.Title[i] = 'Mlle'\n    elif df.Title[i] in ('Capt','Don','Major'):\n        df.Title[i] = 'Sir'\n    elif df.Title[i] in ('Dona','the Countess','Jonkheer'):\n        df.Title[i] = 'Lady'\ndf.Title.value_counts()\n\ndf['FamilySize'] = df.SibSp + df.Parch + 1\nprint(df.FamilySize)\n\nsurname = []\nfor i in range(len(df.Name)):\n    surname.append(re.split(', |\\. ',df.Name[i])[0])\n\n#print(surname)\ndf['Surname'] = surname\ndf.Surname\n\nfamilyid = []\nfor i in range(len(df)):\n    familyid.append(str(df.FamilySize[i])+df.Surname[i])\n\ndf['FamilyID'] = familyid\ndf.FamilyID\n\n\n# In[6]:\n\n#Feature Engineering Test\n\ntesttitle = []\nfor i in range(len(test)):\n    testtitle.append(re.split(', |\\. ',test.Name[i])[1])\n\ntest['Title'] = testtitle\ntest.Title.value_counts()\n\nfor i in range(len(test.Title)):\n    if test.Title[i] == 'Mme':\n        test.Title[i] = 'Mlle'\n    elif test.Title[i] in ('Capt','Don','Major'):\n        test.Title[i] = 'Sir'\n    elif test.Title[i] in ('Dona','the Countess','Jonkheer'):\n        test.Title[i] = 'Lady'\n\ntest['FamilySize'] = test.SibSp + test.Parch + 1\n\ntestsurname = []\nfor i in range(len(test)):\n    testsurname.append(re.split(', |\\. ',test.Name[i])[0])\n\ntest['Surname'] = testsurname\n\ntestfamilyid = []\nfor i in range(len(test)):\n    testfamilyid.append(str(test.FamilySize[i])+test.Surname[i])\n\ntest['FamilyID'] = testfamilyid\n\n\n# In[7]:\n\nfor i in range(len(df)):\n    if pd.isnull(df.Embarked[i]):\n        df.Embarked[i] = 'U'\n\ndf.Sex = preprocessing.LabelEncoder().fit_transform(df.Sex) #Encode training data\ndf.Title = preprocessing.LabelEncoder().fit_transform(df.Title)\ndf.Embarked = preprocessing.LabelEncoder().fit_transform(df.Embarked)\n\ntest.Sex = preprocessing.LabelEncoder().fit_transform(test.Sex)\ntest.Title = preprocessing.LabelEncoder().fit_transform(test.Title)\ntest.Embarked = preprocessing.LabelEncoder().fit_transform(test.Embarked)\n\nX = np.array(df[['Pclass','Sex','Age','SibSp','Parch','Fare','Embarked','Title','FamilySize']])\nprint(X[0:5])\nprint(X.shape)\n\ny = np.array(df['Survived'])\n\nprint(y[0:5])\nprint(y.shape)\n\n\n# In[9]:\n\nkmn = cluster.KMeans(n_clusters=2)\nkmn.fit(X)\n\nTP = 0\nfor i in range(len(y)):\n    if kmn.predict(X)[i] == y[i]:\n        TP+=1\naccuracy = TP/len(y)\nprint(accuracy)\n\n\n# In[ ]:\n\n#knn = neighbors.KNeighborsClassifier(n_neighbors=7,n_jobs=-1)\n#svm = svm.SVC()\n#logit = linear_model.LogisticRegression(n_jobs=-1)\nclf = ensemble.RandomForestClassifier(n_jobs=-1)\n\nk_range = list(range(1,50))\nparam_dist = dict(n_estimators=k_range)\nrand = grid_search.RandomizedSearchCV(clf, param_dist, cv=10, scoring='accuracy', n_iter=10, random_state=5, n_jobs=-1)\nrand.fit(X, y)\nrand.grid_scores_\nprint(rand.best_score_, rand.best_params_)\n\n\n# In[ ]:\n\nrf = ensemble.RandomForestClassifier(n_jobs=-1)\n\ndef classifier(alg, features):\n    X_train, X_test, y_train, y_test = cross_validation.train_test_split(features, y, test_size=0.1)\n    alg.fit(X_train, y_train)\n    alg_train_pp = alg.predict_proba(X_test)\n    print(alg.score(X_test, y_test))\n    \n    #ROC Curve\n    fpr, tpr, thresholds = metrics.roc_curve(y_test, alg_train_pp[:,1])\n    roc_auc = metrics.auc(fpr, tpr)\n    get_ipython().magic('matplotlib inline')\n    fig = plt.plot(fpr, tpr, label='AUC = %0.3f' % roc_auc)\n    plt.plot([0, 1], [0, 1], 'k--')  # random predictions curve\n    plt.xlim([0, 1])\n    plt.ylim([0, 1])\n    plt.xlabel('False Positive Rate or (1 - Specificity)')\n    plt.ylabel('True Positive Rate or (Sensitivity)')\n    plt.title('Receiver Operating Characteristic')\n    plt.legend(loc=\"lower right\")\n    plt.show()\n    return fig\n\n\n# In[ ]:\n\nclassifier(rf,X)\n\n", "sub_path": "titanic-project.py", "file_name": "titanic-project.py", "file_ext": "py", "file_size_in_byte": 4592, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pandas.read_csv", "line_number": 11, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 33, "usage_type": "call"}, {"api_name": "re.split", "line_number": 43, "usage_type": "call"}, {"api_name": "re.split", "line_number": 64, "usage_type": "call"}, {"api_name": "re.split", "line_number": 84, "usage_type": "call"}, {"api_name": "re.split", "line_number": 101, "usage_type": "call"}, {"api_name": "pandas.isnull", "line_number": 115, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 118, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 118, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 119, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 119, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 120, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 120, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 122, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 122, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 123, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 123, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 124, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 124, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 130, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 138, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 138, "usage_type": "name"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 154, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 154, "usage_type": "name"}, {"api_name": "sklearn.grid_search.RandomizedSearchCV", "line_number": 158, "usage_type": "call"}, {"api_name": "sklearn.grid_search", "line_number": 158, "usage_type": "name"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 166, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 166, "usage_type": "name"}, {"api_name": "sklearn.cross_validation.train_test_split", "line_number": 169, "usage_type": "call"}, {"api_name": "sklearn.cross_validation", "line_number": 169, "usage_type": "name"}, {"api_name": "sklearn.metrics.roc_curve", "line_number": 175, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 175, "usage_type": "name"}, {"api_name": "sklearn.metrics.auc", "line_number": 176, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 176, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 178, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 179, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 179, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 180, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 180, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 181, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 182, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 182, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 184, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 185, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 186, "usage_type": "name"}]}
+{"seq_id": "490717284", "text": "import csv\nimport re\nfrom enum import Enum, auto\n\nSTANDARD_HEADER = [['', '', '', 'Summary Data', '', '', '', 'Taxonomy', '', '', '', '', '', '', '', '', 'Description',\n                    '', '', '', '', '', '', '', '', 'Collection Location Data', '', '', '', '', '', 'Collector',\n                    '', '', 'Collection', 'Label Data', 'Storage Location', '', '', '', '',\n                    'Additional Notes (if additional notes become consistent, consider a new separate column'],\n                   ['', '', '', 'UI Number', 'Other Number', 'Other number type', 'Type status', 'Label Family',\n                    'Label Genus', 'Label species', 'Current Family', 'Current Genus', 'Current species', 'Subspecies',\n                    'Common Name', '', 'Variety', 'Preservation', 'Number of specimens', 'Description', 'Sex',\n                    'Stage/Phase', '', 'Condition Rating (Good, Fair, Poor, Unacceptable)',\n                    'Condition details (eg wing fallen off)', 'Level 1 eg.Country', 'Level 2 - eg.County',\n                    'Level 3 - eg.Town/City/Village', 'Level 4 (eg.Nearest named place)', 'Date (DD/MM/YYYY)',\n                    'Bred or not (B if bred/ blank if caught on wing)', 'Surname', 'First name', 'Middle Names',\n                    'Name', 'Verbatum label data', 'Level 1', 'Level 2', 'Level 3', 'Level 4', 'Level 5 +6', ''],\n                   ['ColCollection', 'ColTypeOfItem', 'ColObjectStatus', 'ColObjectNumber', 'ColOtherNumbers_tab',\n                    'ColOtherNumbersType_tab', \"IdeTypeStatus_tab(+ group='1')\",\n                    \"TaxTaxonomyRef_tab(+ group='2').ClaFamily\", \"TaxTaxonomyRef_tab(+ group='2').ClaGenus\",\n                    \"TaxTaxonomyRef_tab(+ group='2').ClaSpecies\", \"TaxTaxonomyRef_tab(+ group='1').ClaFamily\",\n                    \"TaxTaxonomyRef_tab(+ group='1').ClaGenus\", \"TaxTaxonomyRef_tab(+ group='1').ClaSpecies\",\n                    \"TaxTaxonomyRef_tab(+ group='1').ClaSubspecies\", \"TaxTaxonomyRef_tab(+ group='1').ComName_tab\",\n                    \"IdePreferredName_tab(+ group='1')\", 'SpeVariety', 'SpePreservation', 'SpeNumberSpecimens',\n                    'ColPhysicalDescription', 'SpeSex', 'SpeStage', 'ConDateChecked', 'ConConditionStatus',\n                    'ConConditionDetails', 'SitSiteRef_tab.LocCountry_tab', 'SitSiteRef_tab.LocDistrictCountyShire_tab',\n                    'SitSiteRef_tab.LocTownship_tab', 'SitSiteRef_tab.LocNearestNamedPlace_tab',\n                    'ColCollectionDatesText_tab', 'ColCollectionMethod', 'ColCollectorsRef_tab.NamLast',\n                    'ColCollectorsRef_tab.NamFirst', 'ColCollectorsRef_tab.NamMiddle',\n                    'ColSumAssociatedCollections_tab', 'EntLabVerbatimLabelData0', 'LocCurrentLocationRef.LocLevel1',\n                    'LocCurrentLocationRef.LocLevel2', 'LocCurrentLocationRef.LocLevel3',\n                    'LocCurrentLocationRef.LocLevel4', 'LocMovementNotes', 'NotNotes']]\n\n\nclass ResolutionType(Enum):\n    no_clash = auto()\n    just_first = auto()\n    just_last = auto()\n    all = auto()\n\n\ndef add_to_indices(word_index, col_index, indices, resolution_type, num_words):\n    if word_index >= num_words or word_index < 0:\n        raise Exception('index in optional parameter out of range')\n    elif word_index in indices:\n        if resolution_type == ResolutionType.no_clash:\n            raise Exception('repeated index in optional parameter,'\n                            ' consider changing resolution type')\n        elif resolution_type == ResolutionType.just_last:\n            # overwrite the last one\n            indices[word_index] = [col_index]\n        elif resolution_type == ResolutionType.all:\n            indices[word_index].append(col_index)\n    else:\n        indices[word_index] = [col_index]\n\n# ----------------------------------------------------------------------------------------------------------------\n#   split_col :         no return value         mutates table\n#   table: list         list String             expected to contain the field headers in the first row\n#   field_name :        String                  specifying the name of the field to split on, expected in be in table[0]\n#   new_cols:           list String             specifies the field names for the new fields (order matters)\n#   which_words:        list String             (optional) specifies which word indices the new fields correspond to\n#   separator:          String                  (optional) specifies the string used to split the string in the column\n#   resolution_type:    Enum (ResolutionType)   (optional) specifies how word index clashes are resolved\n#   joiner:             String                  (optional) specifies the string used to join words together\n\ndef split_col(table, field_index, new_cols, which_words=None, separator=' ', resolution_type=ResolutionType.no_clash,\n              joiner=' '):\n    for row_index, row in enumerate(table):\n        if row_index != 0:\n            words = row[field_index].split(separator)\n            if len(words) == len(new_cols) and which_words is None:\n                row += words\n            elif which_words is not None and len(which_words) == len(new_cols):\n                wildcard_found = False\n                wildcard_index = -1\n                indices = dict()\n                for new_col_index, word_index in enumerate(which_words):\n                    if word_index == '*':\n                        if wildcard_found:\n                            raise Exception('multiple wildcards passed in optional parameter')\n                        else:\n                            wildcard_found = True\n                            wildcard_index = new_col_index\n                    elif re.match('-?\\\\d+ *: *-?\\\\d+', word_index):\n                        # match a slice\n                        start = int(re.search('(-?\\\\d+) *:', word_index).group(1))\n                        end = int(re.search(': *(-?\\\\d*)', word_index).group(1))\n                        if start < 0:\n                            start = len(words) + start\n                        if end < 0:\n                            end = len(words) + end\n                        if start >= len(words) or end >= len(words) or start < 0 or end < 0:\n                            raise Exception('index in optional parameter out of range')\n                        for i in range(start, end + 1):\n                            add_to_indices(i, new_col_index, indices, resolution_type, len(words))\n                    elif re.match('\\\\[-?\\\\d+(, ?-?\\\\d)*\\\\]', word_index):\n                        # match a list of indices\n                        word_indices = re.findall('-?\\\\d+', word_index)\n                        for w in word_indices:\n                            index = int(w)\n                            if index < 0:\n                                index = len(words) + index\n                            add_to_indices(index, new_col_index, indices, resolution_type, len(words))\n                    elif re.match('\\\\d+', word_index):\n                        add_to_indices(int(word_index), new_col_index, indices, resolution_type, len(words))\n                    elif re.match('-\\\\d+', word_index):\n                        index = len(words) + int(word_index)\n                        add_to_indices(index, new_col_index, indices, resolution_type, len(words))\n                    else:\n                        raise Exception('Failed to match optional parameter')\n\n                row_addition = [[] for _ in range(len(new_cols))]\n                for word_index, word in enumerate(words):\n                    if word_index in indices:\n                        for col_index in indices[word_index]:\n                            row_addition[col_index].append(word)\n                    elif wildcard_found:\n                        row_addition[wildcard_index].append(word)\n                    # else we discard that word\n\n                row_string = [joiner.join(col) for col in row_addition]\n                row += row_string\n\n            else:\n                raise Exception(f'number of words and columns provided not equal at index {row_index}')\n        else:\n            row += new_cols\n\n\ndef matrix_to_csv(table, path):\n    with open(path, mode='w') as outfile:\n        out = csv.writer(outfile, delimiter=',', quotechar='\"', quoting=csv.QUOTE_ALL)\n        out.writerows(table)\n\n\ndef matrix_to_standard(table, field_map, joiner=' ', header=STANDARD_HEADER):\n    # the field map will map the standard field headers to table's field headers\n    result = [[\"\" for _ in range(len(header[0]))] for _ in range(len(table) - 1)]\n    for i in range(len(header)):\n        for field in header[i]:\n            if field in field_map:\n                std_field_index = header[1].index(field)\n                for table_field_name in field_map[field]:\n                    table_field_index = table[0].index(table_field_name)\n                    for row_num, row in enumerate(result):\n                        if row[std_field_index] == '':\n                            row[std_field_index] = table[row_num + 1][table_field_index]\n                        else:\n                            row[std_field_index] += joiner + table[row_num + 1][table_field_index]\n    return result\n\n# ----------------------------------------------------------------------------------------------------------------\n#   matrix_to_standard_csv:     No return value             Outputs to a file specified by path\n#   table:                      list list String            expected to contain field headers in first row\n#   path:                       String                      specifies the output path for the generated CSV\n#   field_map:                  dict   maps a standard field header to a list of our field headers\n#   joiner:                     String                      (optional) specifies string used to join words together\n#   header                      list list String            (optional atm) may consist of multiple rows\n\ndef matrix_to_standard_csv(table, path, field_map, joiner=' ', header=STANDARD_HEADER):\n    with open(path, mode='w') as outfile:\n        out = csv.writer(outfile, delimiter=',', quotechar='\"', quoting=csv.QUOTE_ALL)\n        out.writerows(STANDARD_HEADER)\n        out.writerows(matrix_to_standard(table, field_map, joiner, header))\n\n\ndef read_csv(path):\n    with open(path, mode='r') as infile:\n        reader = csv.reader(infile, delimiter=',', quotechar='\"')\n        table = []\n        for row in reader:\n            table.append(row)\n        return table\n\n", "sub_path": "scratchSpaces/jamesScratchSpace/matrix_to_csv.py", "file_name": "matrix_to_csv.py", "file_ext": "py", "file_size_in_byte": 10562, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "enum.Enum", "line_number": 34, "usage_type": "name"}, {"api_name": "enum.auto", "line_number": 35, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 36, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 37, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 38, "usage_type": "call"}, {"api_name": "re.match", "line_number": 84, "usage_type": "call"}, {"api_name": "re.search", "line_number": 86, "usage_type": "call"}, {"api_name": "re.search", "line_number": 87, "usage_type": "call"}, {"api_name": "re.match", "line_number": 96, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 98, "usage_type": "call"}, {"api_name": "re.match", "line_number": 104, "usage_type": "call"}, {"api_name": "re.match", "line_number": 106, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 132, "usage_type": "call"}, {"api_name": "csv.QUOTE_ALL", "line_number": 132, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 162, "usage_type": "call"}, {"api_name": "csv.QUOTE_ALL", "line_number": 162, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 169, "usage_type": "call"}]}
+{"seq_id": "398120860", "text": "#! /usr/bin/env python3\n\nimport os\nimport io\nfrom flask import Flask, flash, request, redirect, url_for, render_template, send_from_directory\nfrom werkzeug.utils import secure_filename\nfrom nlp_funcs import *\nimport pandas as pd\nimport re\nimport numpy as np\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom scipy.sparse import coo_matrix\nfrom nltk.tokenize import word_tokenize\n\nUPLOAD_FOLDER = \"../data/uploaded_data/\"\nALLOWED_EXTENSIONS = ['pdf']\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n\ndef allowed_file(filename):\n    return '.' in filename and \\\n           filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\n\n@app.route('/home', methods=['GET', 'POST'])\ndef upload_file():\n    if request.method == 'POST':\n        if request.form['submit-button'] == \"upload\":\n\n\n            # first, delete content of upload folder\n            for file in os.listdir(UPLOAD_FOLDER):\n                os.unlink(UPLOAD_FOLDER+file)\n\n            # check if the post request has the file part\n            if 'file' not in request.files:\n                flash('No file part')\n                return redirect(request.url)\n\n            file = request.files['file']\n            # if user does not select file, browser also\n            # submit an empty part without filename\n            if file.filename == '':\n                flash('No selected file')\n                return redirect(request.url)\n\n            if file and allowed_file(file.filename):\n                filename = secure_filename(file.filename)\n                path_to_file = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n\n                file.save(path_to_file)\n                for file in os.listdir(upload_path):\n\n                    #Check if file is a pdf\n                    if file.endswith(\".pdf\"):\n                        pdf_path = upload_path + file\n\n\n                return render_template('extract_info.html', **case_info)\n\n        elif request.form['submit-button'] == \"search\":\n            query = request.form.get('query')\n            new_query = sanititize_input(query)\n            num_res = request.form.get('wtf-value')\n            # handle value error exception and return error message if query not found\n            try:\n                    ind = vectorizer.get_feature_names().index(new_query)\n            except ValueError:\n                return render_template('search_fail.html', title='Home', name=query)\n\n            # find index in vec where query term occurs\n            indices = np.where(vec.col == ind)\n            a = vec.data[indices]\n            b = vec.row[indices]\n            list1, list2 = zip(*sorted(zip(a, b), reverse=True))\n            name1 = []\n            score = []\n            new_list1 = []\n            new_list2 = []\n\n            if int(num_res) == 15:\n                for i in range(len(list1)):\n                    if list1[i] > 0:\n                        new_list1.append(list1[i])\n                        new_list2.append(list2[i])\n\n                list1 = new_list1\n                list2 = new_list2\n                num_res = len(list1)\n\n            # make query term bold in retrieved documents\n            for num in range(len(list2[:int(num_res)])):\n                name1.append(re.sub(query,'{}'.format(new_query), texts[list2[num]], flags=re.I))\n                score.append(round(list1[num], 3))\n\n            return render_template('query_results.html', title='result', name=zip(range(1, int(num_res)+1), name1, score))\n    else:\n        return render_template('search.html', title='upload_page')\n\n@app.route('/send/')\ndef send_pdf(filename):\n    return send_from_directory(UPLOAD_FOLDER, filename)\n\n\nif  __name__==\"__main__\":\n\n    stop_words_dutch_file = open('../data/misc_files/stopwords_dutch.txt', 'r')\n    stop_words_english_file = open('../data/misc_files/stopwords_english.txt', 'r')\n    stop_words_dutch = stop_words_dutch_file.read().split('\\n')\n    stop_words_english = stop_words_english_file.read().split('\\n')\n    stop_words = stop_words_dutch + stop_words_english\n\n    texts = []\n    source_path = \"../data/text_files/clean/\"\n    for file in os.listdir(source_path):\n        with io.open(source_path + file, \"r\", encoding=\"utf-8\") as infile:\n            texts.append(infile.readline())\n    # get dataset and retrieved grams\n    vectorizer = TfidfVectorizer(ngram_range=(1, 3), stop_words=stop_words)\n\n    vec = coo_matrix(vectorizer.fit_transform(texts))\n\n    app.run(debug=True)", "sub_path": "JADS4good/app/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 4475, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "flask.Flask", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 30, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 30, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 34, "usage_type": "call"}, {"api_name": "os.unlink", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 38, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 38, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.request.url", "line_number": 40, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 42, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 42, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.request.url", "line_number": 47, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 47, "usage_type": "name"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 61, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 63, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 63, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 64, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 64, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 64, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 66, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 66, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 74, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 95, "usage_type": "call"}, {"api_name": "re.I", "line_number": 95, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 98, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 100, "usage_type": "call"}, {"api_name": "flask.send_from_directory", "line_number": 104, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 117, "usage_type": "call"}, {"api_name": "io.open", "line_number": 118, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 121, "usage_type": "call"}, {"api_name": "scipy.sparse.coo_matrix", "line_number": 123, "usage_type": "call"}]}
+{"seq_id": "514848671", "text": "\nfrom typing import Optional\nimport torch\nfrom torch.utils.data import DataLoader\n#from dataset import collate_fn\n\n\ndef get_data_loader(dataset: torch.utils.data.Dataset,\n                    batch_size: Optional[int] = 1,\n                    shuffle: Optional[bool] = True) \\\n        -> DataLoader:\n    \"\"\"Returns Pytorch DataLoader.\n    :param dataset: Dataset to iterate over.\n    :type dataset: torch.utils.data.Dataset\n    :param batch_size: Batch size to use.\n    :type batch_size: int\n    :param shuffle: Shall we shuffle the examples?\n    :type shuffle: bool\n    \"\"\"\n\n    return DataLoader(dataset=dataset,\n                      batch_size=batch_size,\n                      shuffle=shuffle)\n\n# EOF\n", "sub_path": "data_init.py", "file_name": "data_init.py", "file_ext": "py", "file_size_in_byte": 705, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "torch.utils", "line_number": 8, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 9, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 10, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 11, "usage_type": "name"}]}
+{"seq_id": "445991886", "text": "from citylearn import  CityLearn\nimport matplotlib.pyplot as plt\nfrom pathlib import Path\nimport numpy as np\nfrom agents.rbc import RBC\n\nclimate_zone = 5\nsim_period = (0, 8760*4-1)\nparams = {'data_path':Path(\"data/Climate_Zone_\"+str(climate_zone)), \n        'building_attributes':'building_attributes.json', \n        'weather_file':'weather_data.csv', \n        'solar_profile':'solar_generation_1kW.csv', \n        'carbon_intensity':'carbon_intensity.csv',\n        'building_ids':[\"Building_\"+str(i) for i in [1,2,3,4,5,6,7,8,9]],\n        'buildings_states_actions':'buildings_state_action_space.json', \n        'simulation_period': sim_period, \n        'cost_function': ['ramping','1-load_factor','average_daily_peak','peak_demand','net_electricity_consumption','carbon_emissions'], \n        'central_agent': False,\n        'save_memory': False,\n        'verbose': 1}\n\nenv = CityLearn(**params)\n\nobservations_spaces, actions_spaces = env.get_state_action_spaces()\n\n# Simulation without energy storage\nenv.reset()\ndone = False\nwhile not done:\n    _, rewards, done, _ = env.step([[0 for _ in range(len(actions_spaces[i].sample()))] for i in range(9)])\ncost_no_storage, cost_no_storage_last_yr = env.cost()\n\nenv.cost()\n\ninterval = range(sim_period[0], sim_period[1])\nplt.figure(figsize=(12,8))\nplt.plot(env.net_electric_consumption[interval]+env.electric_generation[interval]-env.electric_consumption_cooling_storage[interval]-env.electric_consumption_dhw_storage[interval])\nplt.plot(env.net_electric_consumption[interval]-env.electric_consumption_cooling_storage[interval]-env.electric_consumption_dhw_storage[interval])\nplt.legend(['Electricity demand without storage or generation (kW)', 'Electricity demand with PV generation and without storage(kW)'])\nplt.show()", "sub_path": "tEnvPlot.py", "file_name": "tEnvPlot.py", "file_ext": "py", "file_size_in_byte": 1765, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pathlib.Path", "line_number": 9, "usage_type": "call"}, {"api_name": "citylearn.CityLearn", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}]}
+{"seq_id": "398795442", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n    dependencies = [\n        ('appreality', '0002_auto_20150702_0914'),\n    ]\n\n    operations = [\n        migrations.CreateModel(\n            name='Servicio',\n            fields=[\n                ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n                ('imagen', models.ImageField(upload_to=b'img/servicios', verbose_name=b'Imagen del Servicio')),\n                ('servicio', models.CharField(max_length=50, verbose_name=b'Descripcion del Servicio')),\n                ('descripcion', models.TextField(verbose_name=b'Descripci\\xc3\\xb3n del Servicio')),\n            ],\n            options={\n                'verbose_name_plural': 'Servicios',\n            },\n        ),\n        migrations.AlterModelOptions(\n            name='cargo',\n            options={'verbose_name_plural': 'Cargos'},\n        ),\n        migrations.AlterModelOptions(\n            name='cliente',\n            options={'ordering': ['-id'], 'verbose_name_plural': 'Clientes'},\n        ),\n    ]\n", "sub_path": "appreality/migrations/0003_auto_20150704_1008.py", "file_name": "0003_auto_20150704_1008.py", "file_ext": "py", "file_size_in_byte": 1166, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.ImageField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterModelOptions", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterModelOptions", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 30, "usage_type": "name"}]}
+{"seq_id": "22525737", "text": "\"\"\"\n* Purpose:This file contains all the custom created decorators which are\n          required in project\n\n* @author: Nikhil Lad\n* @version: 3.7\n* @since: 11-3-2019\n\n\"\"\"\n\nfrom django.contrib.auth.decorators import user_passes_test\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.http import QueryDict, HttpResponse\nfrom self import self\nfrom .redis_services import redis_info\nimport jwt\n\n\ndef custom_login_required(function=None,login_url =''):\n    try:\n        def is_login(request):\n            token = redis_info.get_token(self,'token')  # gets the token from redis cache\n            if token:\n                token = token.decode(encoding='utf-8')  # decodes the token ( from Bytes to str )\n                decoded_token = jwt.decode(token, 'secret_key',algorithms=['HS256'])  # decodes JWT token and gets the values Username etc\n                user = User.objects.get(username=decoded_token['username']).pk  # gets the user from username\n            else:\n                return None\n            return User.objects.filter(pk=user).exists()     # if user is present in DB.\n        actual_decorator = user_passes_test(is_login)           # user_passes_test to check if some test passes or not\n        if function:\n            return actual_decorator(function)\n        else:\n            return HttpResponse(\"Not valid user\")\n    except (ObjectDoesNotExist, AttributeError, Exception) as e:\n       return HttpResponse(\"Dose not exist\")\n\n\ndef custom_Log(function):\n    try:\n        def wrap(request,*args, **kwargs):\n            token = request.META.get('HTTP_AUTHORIZATION')\n            if token:\n                decoded_token = jwt.decode(token, 'secret_key', algorithms=['HS256'])  # decodes JWT token and gets the values Username etc\n                user=User.objects.get(username=decoded_token['username'])\n                if user:\n                    request.user=user\n                else:\n                    raise ObjectDoesNotExist\n            else:\n                return None\n            if function:\n                return function(request,*args,**kwargs)\n            else:\n                return HttpResponse(\"Not valid user\")\n        return wrap\n    except (User.DoesNotExist,ObjectDoesNotExist, AttributeError, Exception) as e:\n       return \"Dose not exist\"\n\n    except (ObjectDoesNotExist,AttributeError ,Exception) as e:\n        print('exception occurs',e)\n\n        return redirect(reverse('login_v'))\n", "sub_path": "restapi_demo/apidemo/custom_decorators.py", "file_name": "custom_decorators.py", "file_ext": "py", "file_size_in_byte": 2486, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "redis_services.redis_info.get_token", "line_number": 23, "usage_type": "call"}, {"api_name": "self.self", "line_number": 23, "usage_type": "argument"}, {"api_name": "redis_services.redis_info", "line_number": 23, "usage_type": "name"}, {"api_name": "jwt.decode", "line_number": 26, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 27, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 27, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 30, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 30, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.user_passes_test", "line_number": 31, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 35, "usage_type": "call"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 36, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 37, "usage_type": "call"}, {"api_name": "jwt.decode", "line_number": 45, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 46, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 46, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 46, "usage_type": "name"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 50, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 56, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.DoesNotExist", "line_number": 58, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 58, "usage_type": "name"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 58, "usage_type": "name"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 61, "usage_type": "name"}]}
+{"seq_id": "566469401", "text": "import random\nfrom functools import reduce\nfrom operator import iadd\nimport queue\n\n\nclass Tangle:\n    def __init__(self, genesis_site, walker_num,_seed=None):\n        self.genesis_site = genesis_site\n        self.walker_num = walker_num\n        random.seed(_seed)\n        self.id_node_map={self.genesis_site.id:genesis_site}\n\n    @staticmethod\n    def init_with_fork(seed=None):\n        genesis_site=Tangle_Site.get_genesis_site()\n        fork_tangle = Tangle(genesis_site,\n                             2,seed)\n        site2 = Tangle_Site([1], [], None, 1, [genesis_site],0,2)\n        site3 = Tangle_Site([1], [], None, 0,[genesis_site],0, 3)\n        fork_tangle.insert_site(site2)\n        fork_tangle.insert_site(site3)\n        return fork_tangle\n    \n    def random_walk(self):\n        walker_list = []\n        walker_start_point_list = []\n        selected_tip = []\n        for i in range(self.walker_num):\n            walker_start_point_list.append(self.genesis_site)\n            walker_list.append(self.genesis_site)\n        while(True):\n            walking_order = [i for i in range(len(walker_list))]\n            random.shuffle(walking_order)\n            to_be_deleted_walker = []\n            for i in walking_order:\n                if not walker_list[i].children_list:\n                    if not selected_tip:\n                        selected_tip.append(walker_list[i])\n                        to_be_deleted_walker.append(walker_list[i])\n                        continue\n                    else:\n                        if selected_tip[0].vote != walker_list[i].vote:\n                            del selected_tip[0]\n                            walker_list.append(self.genesis_site)\n                            walker_list[i] = self.genesis_site\n                            continue\n                        else:\n                            selected_tip.append(walker_list[i])\n                            return selected_tip\n\n                transition_probability_list = self.calculate_transition_probability(\n                    walker_start_point_list[i], walker_list[i])\n                cumulative_probability_list = list(reduce(lambda result, x: iadd(\n                    result, [result[-1] + x]), transition_probability_list, [0])[1:])\n                sum = reduce(lambda a, b: a+b, transition_probability_list)\n                random_point = random.randint(1, sum)\n                for j in range(len(cumulative_probability_list)):\n                    if random_point <= cumulative_probability_list[j]:\n                        walker_list[i] = walker_list[i].children_list[j]\n                        if not walker_list[i].children_list:\n                            if not selected_tip:\n                                selected_tip.append(walker_list[i])\n                                to_be_deleted_walker.append(walker_list[i])\n                                break\n                            else:\n                                if selected_tip[0].vote != walker_list[i].vote:\n                                    del selected_tip[0]\n                                    walker_list.append(self.genesis_site)\n                                    break\n                                else:\n                                    selected_tip.append(walker_list[i])\n                                    return selected_tip\n                        break\n\n            for walker in to_be_deleted_walker:\n                walker_list.remove(walker)\n\n    def calculate_transition_probability(self, start_point, walker):\n        cumulative_weight_list = []\n        for child in walker.children_list:\n            cumulative_weight_list.append(child.calculate_cumulative_weight())\n        return cumulative_weight_list\n\n    def check_site_present(self, site):\n        return self.genesis_site.check_site_present(site)\n\n    def insert_site(self, site):\n        self.id_node_map[site.id]=site\n        for father_id in site.father_id_list:\n            father_site = self.id_node_map[father_id]\n            if not father_site:\n                raise RuntimeError\n            father_site.children_list.append(site)\n            site.father_list.append(father_site)\n        site.update_weight()\n\n    def check_identical(self, another_tangle):\n        self.simple_print()\n        return self.genesis_site.check_identical_site(another_tangle.genesis_site)\n\n    def simple_print(self):\n        q = queue.Queue()\n        q.put(self.genesis_site)\n        visited = {self.genesis_site}\n        layer_dict = {0: [self.genesis_site]}\n        depth_dict = {self.genesis_site.id: 0}\n        while(not q.empty()):\n            current_site = q.get()\n            if not current_site.children_list:\n                continue\n            for child in current_site.children_list:\n                if not child in visited:\n                    visited.add(child)\n                    q.put(child)\n                    if (depth_dict[current_site.id]+1) not in layer_dict.keys():\n                        layer_dict[depth_dict[current_site.id]+1] = []\n                    depth_dict[child.id] = depth_dict[current_site.id]+1\n                    layer_dict[depth_dict[current_site.id]+1].append(child)\n        ratio_dict = {}\n        for k, v in layer_dict.items():\n            count_list = [0, 0]\n            for i in range(len(v)):\n                if v[i].vote != 0 and v[i].vote != 1:\n                    continue\n                count_list[v[i].vote] = count_list[v[i].vote]+1\n            sum = count_list[0]+count_list[1]\n            if sum != 0:\n                count_list[0] = count_list[0]*1.0/sum\n                count_list[1] = count_list[1]*1.0/sum\n            for i in range(len(v)):\n                v[i] = v[i].id\n            ratio_dict[k] = count_list\n        print(layer_dict)\n        print(ratio_dict)\n\n\nclass Tangle_Site:\n    current_id = 4\n\n    def __init__(self, father_id_list, children_list, miner, vote,father_list,weight, id=None):\n        if id == None:\n            self.id = Tangle_Site.current_id\n            Tangle_Site.current_id = Tangle_Site.current_id+1\n            self.father_id_list = father_id_list\n            self.father_list=father_list\n            self.children_list = children_list\n            self.miner = miner\n            self.vote = vote\n            self.weight=weight\n        else:\n            self.id = id\n            self.father_id_list = father_id_list\n            self.father_list = father_list\n            self.children_list = children_list\n            self.miner = miner\n            self.vote = vote\n            self.weight = weight\n    \n    @staticmethod\n    def get_genesis_site():\n        genesis_site = Tangle_Site([], [], None, None,[],0, 1)\n        return genesis_site\n\n    def update_weight(self):\n        visited=[]\n        self.weight=0\n        self.update_weight_helper(visited)\n    \n    # def update_weight_helper(self,visited):\n    #     if self.id in visited:\n    #         return\n    #     visited.append(self.id)\n    #     self.weight=self.weight+1\n    #     for father in self.father_list:\n    #         father.update_weight_helper(visited)\n    \n\n    def update_weight_helper(self, visited):\n        q = []\n        q.append(self)\n        visited = {self}\n        while(q):\n            current_site = q.pop()\n            current_site.weight=current_site.weight+1\n            for child in current_site.father_list:\n                if not child in visited:\n                    visited.add(child)\n                    q.append(child)\n\n    def calculate_cumulative_weight(self):\n        # len(self.calculate_descendants_helper())\n        # return len(self.calculate_descendants_helper())\n        return self.weight\n\n    def calculate_descendants_helper(self):\n        raise NotImplementedError\n        # visited = []\n        # descendant_set = set()\n        # self.calculate_descendants(visited, descendant_set)\n        # return descendant_set\n\n    def calculate_descendants(self, visited, descendant_set):\n        raise NotImplementedError\n        # if self.id in visited:\n        #     return\n        # visited.append(self.id)\n        # descendant_set.add(self.id)\n        # if not self.children_list:\n        #     return\n        # for child in self.children_list:\n        #     child.calculate_descendants(visited, descendant_set)\n\n    # def find_site_with_id_helper(self, id, visited, site):\n    #     if self.id in visited:\n    #         return\n    #     visited.append(self.id)\n    #     if self.id == id:\n    #         site[0] = self\n    #         return\n    #     if not self.children_list:\n    #         return\n    #     for child in self.children_list:\n    #         child.find_site_with_id_helper(id, visited, site)\n    #         if site[0]:\n    #             return\n    #     return\n    \n    def find_site_with_id_helper(self, id, visited, site):\n        q = []\n        q.append(self)\n        visited = {self}\n        while(q):\n            current_site = q.pop()\n            if current_site.id == id:\n                site[0] = current_site\n                return\n            for child in current_site.children_list:\n                if not child in visited:\n                    visited.add(child)\n                    q.append(child)\n        raise RuntimeError\n\n\n\n    def find_site_with_id(self, id):\n        visited = []\n        site = [None]\n        self.find_site_with_id_helper(id, visited, site)\n        return site[0]\n\n    def clone(self):\n        return Tangle_Site(self.father_id_list.copy(), [], self.miner, self.vote,[],0,self.id)\n\n    def copy(self):\n        return self.clone()\n\n    def check_site_present_helper(self, site, visited, present):\n        if self.id in visited:\n            return\n        visited.append(self.id)\n        if self.id == site.id:\n            present[0] = True\n            return\n        if not self.children_list:\n            return\n        for child in self.children_list:\n            child.check_site_present_helper(site, visited, present)\n            if present[0]:\n                return\n        return\n    \n\n    def check_site_present(self, site):\n        visited = []\n        present = [False]\n        self.check_site_present_helper(site, visited, present)\n        return present[0]\n\n    def check_identical_site(self, another_site):\n        if self.id != another_site.id:\n            return False\n        if self.father_id_list != another_site.father_id_list:\n            return False\n        if not self.children_list:\n            if not another_site.children_list:\n                return True\n            return False\n        mychildren_dict = {}\n        for child in self.children_list:\n            mychildren_dict[child.id] = child\n        hischildren_dict = {}\n        for child in another_site.children_list:\n            hischildren_dict[child.id] = child\n        if len(mychildren_dict) != len(hischildren_dict):\n            return False\n        for k, v in mychildren_dict.items():\n            if k not in hischildren_dict.keys():\n                return False\n            if not v.check_identical_site(hischildren_dict[k]):\n                return False\n        return True\n\n    def get_sender(self):\n        return self.miner\n", "sub_path": "src/Util/Tangle.py", "file_name": "Tangle.py", "file_ext": "py", "file_size_in_byte": 11122, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "random.seed", "line_number": 11, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 34, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 54, "usage_type": "call"}, {"api_name": "operator.iadd", "line_number": 54, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 56, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 57, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 103, "usage_type": "call"}]}
+{"seq_id": "43807505", "text": "import numpy as np\nimport cv2\n\n# define a video input\ncap = cv2.VideoCapture(0)\nbackground = cv2.imread('background.png')\n\nwhile(True):\n\n\t# Capture frame-by-frame\n\tret, frame = cap.read()\n\n\t# Flip the frame horizontally\n\tframe = cv2.flip(frame, 1)\n\n\tno_background = frame - background\n\n\t# Display the resulting frame\n\tcv2.imshow('frame', frame)\n\tcv2.imshow('NO background', no_background)\n\n\t# Wait until key is pressed\n\tkey = cv2.waitKey(1) & 0xFF\n\n\t# (esc) pressed = quit\n\tif key == 27:\n\t\tbreak\n\n# Release everything if job is finished\ncap.release()\ncv2.destroyAllWindows()\n", "sub_path": "webcam/no_background.py", "file_name": "no_background.py", "file_ext": "py", "file_size_in_byte": 575, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "cv2.VideoCapture", "line_number": 5, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.flip", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 31, "usage_type": "call"}]}
+{"seq_id": "575993078", "text": "\"\"\"Class for authentication tests.\"\"\"\nfrom django.contrib.auth.models import User\nfrom django.test import TestCase\nfrom django.shortcuts import reverse\n\n\nclass AuthenticationTest(TestCase):\n    \"\"\"Test cases for authentication function.\"\"\"\n\n    def setUp(self):\n        \"\"\"Initialize the new user.\"\"\"\n        self.user = {\n            'username': 'username',\n            'password': 'password'\n        }\n        User.objects.create_user(**self.user)\n\n    def test_user_logged_in(self):\n        \"\"\"Test user login function, the user's username should display on the index page.\"\"\"\n        response = self.client.post(reverse('login'), self.user)\n        self.assertEqual(response.status_code, 302)\n        response = self.client.get(reverse('polls:index'))\n        self.assertTrue(response.context['user'].is_authenticated)\n        self.assertContains(response, f'Welcome back, {self.user[\"username\"]}')\n\n    def test_user_logged_out(self):\n        \"\"\"Test user logout function, the user's username will not be shown on the index page.\"\"\"\n        self.client.post(reverse('login'), self.user)\n        response = self.client.post(reverse('logout'))\n        self.assertEqual(response.status_code, 302)\n        response = self.client.get(reverse('polls:index'))\n        self.assertEqual(response.status_code, 200)\n        self.assertFalse(response.context['user'].is_authenticated)\n        self.assertNotContains(response, f'Welcome back, {self.user[\"username\"]}')\n", "sub_path": "polls/tests/test_auth.py", "file_name": "test_auth.py", "file_ext": "py", "file_size_in_byte": 1461, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.test.TestCase", "line_number": 7, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 16, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 16, "usage_type": "name"}, {"api_name": "django.shortcuts.reverse", "line_number": 20, "usage_type": "call"}, {"api_name": "django.shortcuts.reverse", "line_number": 22, "usage_type": "call"}, {"api_name": "django.shortcuts.reverse", "line_number": 28, "usage_type": "call"}, {"api_name": "django.shortcuts.reverse", "line_number": 29, "usage_type": "call"}, {"api_name": "django.shortcuts.reverse", "line_number": 31, "usage_type": "call"}]}
+{"seq_id": "587496680", "text": "import os\nimport time\nfrom pprint import pprint\n\nimport cv2\nimport drest\n\nfrom CONSTANTS import CURRENT_IMAGE_FILE, FACE_API_KEY, FACE_BASE_URL, DEFAULT_PERSON_GROUP, CAPTURE_INTERVAL, \\\n    REST_SERVER_URL\nfrom camera.Camera import Camera\nfrom face.FaceAPIWrapper import FaceAPIWrapper\n\n\ndef add_person(person_name, image_urls, person_group=DEFAULT_PERSON_GROUP):\n    print(\"Name:\", person_name)\n    print(\"Images:\", image_urls)\n    face_api = FaceAPIWrapper(FACE_API_KEY, FACE_BASE_URL)\n    person_id = face_api.create_person(person_group=person_group, person_name=person_name)  # Save this\n    print(\"IMP! PERSON_ID for\", person_name, \"is\", person_id)\n\n    for image_url in image_urls:\n        time.sleep(5)\n        face_api.add_faces_to_person(person_group=person_group,\n                                     person_id=person_id, image_url=image_url)\n        print(\"Adding \", image_urls.index(image_url), \"of \", len(image_urls))\n\n    face_api.train_group(person_group)\n    print(\"Started Training\", person_group, \"...\")\n    return person_name, person_id\n\n\ndef initial_setup():\n    person_group = DEFAULT_PERSON_GROUP\n\n    face_api = FaceAPIWrapper(FACE_API_KEY, FACE_BASE_URL)\n    pprint(face_api.list_groups())\n\n    face_api.delete_group(person_group)\n    face_api.create_group(person_group)\n\n    person_and_images = [\n        (\"Tanmay Sawant\", \"face/images/Tanmay_Sawant/\"),\n        (\"Rohan Sawant\", \"face/images/Rohan_Sawant/\"),\n        (\"Peter Hook\", \"face/images/Peter_Hook/\"),\n    ]\n    person_id_dict = {}\n    for person_name, images_dir in person_and_images:\n        name, id = add_person(person_name,\n                              [os.path.join(images_dir, f) for f in\n                               os.listdir(images_dir)],\n                              person_group=person_group)\n        person_id_dict[id] = name\n    pprint(person_id_dict)\n\n    # image_urls = os.listdir(\"images/Rohan_Sawant/train\")\n\n\ndef main():\n    image_file = CURRENT_IMAGE_FILE\n    person_group_id = DEFAULT_PERSON_GROUP\n    person_lookup_dict = {'810202ff-6f6b-4582-a326-6c7f97eb67ad': 'Rohan Sawant',\n                          'c09e9880-8dfc-4957-8ac2-a189b64f8674': 'Peter Hook',\n                          'e376f233-70b3-4b70-a30e-44b7990192bd': 'Tanmay Sawant'}\n\n    camera = Camera()\n    face_api_wrapper = FaceAPIWrapper(FACE_API_KEY, FACE_BASE_URL)\n\n    # Create a generic client api object\n    api = drest.API(REST_SERVER_URL)\n\n    response = api.make_request('GET', '/students')\n    pprint(response.data)\n\n    # Or attach a resource\n    api.add_resource('students')\n\n    # Get available resources\n    print(\"API Resources:\")\n    pprint(api.students.get().data)\n\n    # Get all objects of a resource\n    # GET http://localhost:8000/api/v1/users/\n    response = api.students.get()\n\n    while 1:\n        print(\"Capturing Image every \", CAPTURE_INTERVAL)\n        image = camera.capture_image()\n        cv2.imwrite(image_file, image)\n        cv2.imshow(\"Camera Image\", image)\n        cv2.waitKey(1)\n        face_ids = face_api_wrapper.detect_faces(image=image_file)\n        if face_ids:\n            identified_person_id = \\\n                face_api_wrapper.identify_face(face_ids=face_ids,\n                                               large_person_group=person_group_id)\n            if identified_person_id:\n                try:\n                    person_name = person_lookup_dict[identified_person_id]\n                except IndexError as ie:\n                    person_name = None\n                print(\"Person in camera is\", person_name)\n        time.sleep(CAPTURE_INTERVAL)\n\n\nif __name__ == '__main__':\n    # initial_setup()\n    main()\n", "sub_path": "code/raspberry-pi-client/run_camera_client.py", "file_name": "run_camera_client.py", "file_ext": "py", "file_size_in_byte": 3633, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "CONSTANTS.DEFAULT_PERSON_GROUP", "line_number": 14, "usage_type": "name"}, {"api_name": "face.FaceAPIWrapper.FaceAPIWrapper", "line_number": 17, "usage_type": "call"}, {"api_name": "CONSTANTS.FACE_API_KEY", "line_number": 17, "usage_type": "argument"}, {"api_name": "CONSTANTS.FACE_BASE_URL", "line_number": 17, "usage_type": "argument"}, {"api_name": "time.sleep", "line_number": 22, "usage_type": "call"}, {"api_name": "CONSTANTS.DEFAULT_PERSON_GROUP", "line_number": 33, "usage_type": "name"}, {"api_name": "face.FaceAPIWrapper.FaceAPIWrapper", "line_number": 35, "usage_type": "call"}, {"api_name": "CONSTANTS.FACE_API_KEY", "line_number": 35, "usage_type": "argument"}, {"api_name": "CONSTANTS.FACE_BASE_URL", "line_number": 35, "usage_type": "argument"}, {"api_name": "pprint.pprint", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 50, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 53, "usage_type": "call"}, {"api_name": "CONSTANTS.CURRENT_IMAGE_FILE", "line_number": 59, "usage_type": "name"}, {"api_name": "CONSTANTS.DEFAULT_PERSON_GROUP", "line_number": 60, "usage_type": "name"}, {"api_name": "camera.Camera", "line_number": 65, "usage_type": "name"}, {"api_name": "camera.Camera.Camera", "line_number": 65, "usage_type": "call"}, {"api_name": "face.FaceAPIWrapper.FaceAPIWrapper", "line_number": 66, "usage_type": "call"}, {"api_name": "CONSTANTS.FACE_API_KEY", "line_number": 66, "usage_type": "argument"}, {"api_name": "CONSTANTS.FACE_BASE_URL", "line_number": 66, "usage_type": "argument"}, {"api_name": "drest.API", "line_number": 69, "usage_type": "call"}, {"api_name": "CONSTANTS.REST_SERVER_URL", "line_number": 69, "usage_type": "argument"}, {"api_name": "pprint.pprint", "line_number": 72, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 79, "usage_type": "call"}, {"api_name": "CONSTANTS.CAPTURE_INTERVAL", "line_number": 86, "usage_type": "argument"}, {"api_name": "camera.Camera.capture_image", "line_number": 87, "usage_type": "call"}, {"api_name": "camera.Camera", "line_number": 87, "usage_type": "name"}, {"api_name": "cv2.imwrite", "line_number": 88, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 89, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 90, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 102, "usage_type": "call"}, {"api_name": "CONSTANTS.CAPTURE_INTERVAL", "line_number": 102, "usage_type": "argument"}]}
+{"seq_id": "493292822", "text": "import numpy as np\nfrom collections import defaultdict\nfrom warnings import warn\n\n\nlinear = lambda a, b: np.dot(a, b)\n\n\ndef gaussian(a, b, variance, sigma=None):\n    \"\"\"\n    Computes the Gaussian similarity measure between the given arrays a and b, with\n    the sd specified by sigma or variance specified directly.\n    \"\"\"\n    ssq = variance if sigma is None else sigma**2\n    \n    return np.exp(-1*np.linalg.norm(a-b)**2/(2*ssq))\n\n\npolynomial = lambda a, b, deg=3, intercept=0.0: np.dot(a, b)**deg+intercept\n\n\ndef spectrum(a, b, k, sim_fn=None, **sf_params):\n    \"\"\"\n    Computes the spectrum kernel, which is a similarity measure of two sequences\n    based on the number of occurrences of subausequences of a given length in each one,\n    without explicitly mapping them to the feature space.\n\n    Parameters\n    ----------\n    a, b: numpy.ndarray or list\n    k: int\n        Length of subsequences\n    sim_fn: callable\n        Function to use to calculate the similarity measure between the calculated frequency\n        vectors. The function must take either dictionaries representing sparse vectors or numpy\n        arrays as inputs; if this argument is not specified, the inner\n        product (dot product) is used.\n    \"\"\"\n    assert(len(a) == len(b))\n    count_a, count_b = defaultdict(int), defaultdict(int)\n    for p in range(len(a)-k+1):\n        count_a[a[p:p+k]] += 1\n        count_b[b[p:p+k]] += 1\n\n    if sim_fn is None:\n        res = 0\n        for subseq, freq in count_a.items():\n            if subseq in count_b:\n                res += freq*count_b[subseq]\n    else:\n        try:\n            res = sim_fn(count_a, count_b, **sf_params)\n        except:\n            try:\n                # see if the function will take compressed sparse row matrices\n                from scipy.sparse import coo_matrix, csr_matrix\n\n                subsequences = np.unique(np.array(list(count_a.keys())+list(count_b.keys())))\n                coords = {}\n                coords.update((seq, i) for i, seq in enumerate(subsequences))\n                \n                def _tosparse(fdict):\n                    rows, cols, freqs = [], [], []\n                    for k, v in fdict.items():\n                        rows.append(1)\n                        cols.append(coords[k])\n                        freqs.append(v)\n\n                    return csr_matrix(coo_matrix((freqs, (rows, cols))))\n\n                res = sim_fn(_tosparse(count_a), _tosparse(count_b), **sf_params)\n\n            except:\n                # try with numpy arrays\n                warn(f\"\"\"Provided similarity function {sim_fn.__name__} doesn't support sparse vector representations - trying with dense numpy arrays.\nThis can be very slow for long sequences with high-dimensional frequency spaces\"\"\", RuntimeWarning)\n                try:\n                    def to_np(fdict):\n                        l = []\n                        for s, i in coords.items():\n                            try:\n                                l.append(fdict[s])\n                            except KeyError:\n                                l.append(0)\n\n                        return np.array(l)\n\n                    res = sim_fn(to_np(count_a), to_np(count_b), **sf_params)\n                except:\n                    raise TypeError(\"\"\"Similarity function must take dictionary representations of sparse matrices, scipy.sparse.csr_matrix objects\nor numpy arrays\"\"\")\n\n    return res             \n                    \n                \n\n", "sub_path": "kernels.py", "file_name": "kernels.py", "file_ext": "py", "file_size_in_byte": 3477, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "numpy.dot", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 19, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 58, "usage_type": "call"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 69, "usage_type": "call"}, {"api_name": "scipy.sparse.coo_matrix", "line_number": 69, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 86, "usage_type": "call"}]}
+{"seq_id": "7587170", "text": "#Autor: Pablo gullith\n#Bibliotecas\nfrom scipy import sin, cos, array, arange, sqrt\nfrom pylab import plot, show, xlabel, ylabel\n\n#Constantes\nC = 2\nl = 0.1\ntheta_0 = 0.0\nomega_0 = 0.0\nt_0 = 0.0\nt_max = 100\ng = 9.81\nN = 5000\nh = (t_max - t_0) / N\n\ndef f(r, t, Omega):\n    theta = r[0]\n    omega = r[1]\n    return array([omega, -(g / l) * sin(theta) + C * cos(theta) * sin(Omega * t)], float)\n\n\ndef theta(Omega):\n    pontos_t = arange(t_0, t_max, h)\n    pontos_theta = []\n    r = array([theta_0, omega_0], float)\n    for t in pontos_t:\n        pontos_theta.append(r[0])\n        k1 = h * f(r, t, Omega)\n        k2 = h * f(r + 0.5 * k1, t + 0.5 * h, Omega)\n        k3 = h * f(r + 0.5 * k2, t + 0.5 * h, Omega)\n        k4 = h * f(r + k3, t + h, Omega)\n        r += (k1 + 2 * k2 + 2 * k3 + k4) / 6\n\n    return pontos_theta\n\n\npontos_t = arange(t_0, t_max, h)\nplot(pontos_t, theta(5), 'c')\nshow()\nplot(pontos_t, theta(sqrt(g/l)), 'c')\nshow()", "sub_path": "Questao1.py", "file_name": "Questao1.py", "file_ext": "py", "file_size_in_byte": 932, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "scipy.array", "line_number": 20, "usage_type": "call"}, {"api_name": "scipy.sin", "line_number": 20, "usage_type": "call"}, {"api_name": "scipy.cos", "line_number": 20, "usage_type": "call"}, {"api_name": "scipy.arange", "line_number": 24, "usage_type": "call"}, {"api_name": "scipy.array", "line_number": 26, "usage_type": "call"}, {"api_name": "scipy.arange", "line_number": 38, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 39, "usage_type": "call"}, {"api_name": "pylab.show", "line_number": 40, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 41, "usage_type": "call"}, {"api_name": "scipy.sqrt", "line_number": 41, "usage_type": "call"}, {"api_name": "pylab.show", "line_number": 42, "usage_type": "call"}]}
+{"seq_id": "620337295", "text": "import copy\nimport dataclasses\nfrom lxml import etree\nimport re\nfrom svg_meta import svgns\nimport svg_pathops\nfrom svg_types import *\n\n_ELEMENT_CLASSES = {\n  'circle': SVGCircle,\n  'ellipse': SVGEllipse,\n  'line': SVGLine,\n  'path': SVGPath,\n  'polygon': SVGPolygon,\n  'polyline': SVGPolyline,\n  'rect': SVGRect,\n}\n_CLASS_ELEMENTS = {v: f'{{{svgns()}}}{k}' for k, v in _ELEMENT_CLASSES.items()}\n_ELEMENT_CLASSES.update({f'{{{svgns()}}}{k}': v for k, v in _ELEMENT_CLASSES.items()})\n\n_OMIT_FIELD_IF_BLANK = { f.name for f in dataclasses.fields(SVGShape) }\n\n_ATTR_RENAMES = {\n  'clip-path': 'clip_path'\n}\n_FIELD_RENAMES = {v: k for k, v in _ATTR_RENAMES.items()}\n\ndef _el_to_data(el):\n  if el.tag not in _ELEMENT_CLASSES:\n    raise ValueError(f'Bad tag <{el.tag}>')\n  data_type = _ELEMENT_CLASSES[el.tag]\n  args = {f.name: f.type(el.attrib[_FIELD_RENAMES.get(f.name, f.name)])\n          for f in dataclasses.fields(data_type)\n          if _FIELD_RENAMES.get(f.name, f.name) in el.attrib}\n  return data_type(**args)\n\ndef _data_to_el(data_obj):\n  el = etree.Element(_CLASS_ELEMENTS[type(data_obj)])\n  for field_name, field_value in dataclasses.asdict(data_obj).items():\n    if field_name in _OMIT_FIELD_IF_BLANK and not field_value:\n      continue\n    el.attrib[_FIELD_RENAMES.get(field_name, field_name)] = str(field_value)\n  return el\n\ndef _apply_swaps(svg_root, swaps):\n  for old_el, new_el in swaps:\n    parent = old_el.getparent()\n    old_el.getparent().replace(old_el, new_el)\n\ndef shape_to_path(shape):\n  svg_root = _etree(shape, duplicate=False)\n  data_obj = _el_to_data(svg_root)\n  return data_obj.as_path()\n\nclass SVG:\n  def __init__(self, svg_root):\n    self.svg_root = svg_root\n    self.elements = None\n\n  def _elements(self):\n    if self.elements:\n      return self.elements\n    elements = []\n    for el in self.svg_root.iter('*'):\n      if el.tag not in _ELEMENT_CLASSES:\n        continue\n      elements.append((el, _el_to_data(el)))\n    self.elements = elements\n    return self.elements\n\n  def shapes(self):\n    return tuple(s for (_, s) in self._elements())\n\n  def shapes_to_paths(self, inplace=False):\n    \"\"\"Converts all basic shapes to their equivalent path.\"\"\"\n    if not inplace:\n      svg = SVG(copy.deepcopy(self.svg_root))\n      svg.shapes_to_paths(inplace=True)\n      return svg\n\n    swaps = []\n    for idx, (el, shape) in enumerate(self._elements()):\n      self.elements[idx] = (el, shape.as_path())\n    return self\n\n  def _resolve_url(self, url, el_tag):\n    match = re.match(r'^url[(]#([\\w-]+)[)]$', url)\n    if not match:\n      raise ValueError(f'Unrecognized url \"{url}\"')\n    xpath = f'//svg:{el_tag}[@id=\"{match.group(1)}\"]'\n    els = self.svg_root.xpath(xpath, namespaces={'svg': svgns()})\n    if len(els) != 1:\n      raise ValueError(f'Need exactly 1 match for {xpath}, got {len(els)}')\n    return els[0]\n\n  def apply_clip_paths(self, inplace=False):\n    \"\"\"Apply clipping to shapes and remove the clip paths.\"\"\"\n    if not inplace:\n      svg = SVG(copy.deepcopy(self.svg_root))\n      svg.apply_clip_paths(inplace=True)\n      return svg\n\n    self._update_etree()\n\n    # find elements with clip paths\n    clips = []  # 2-tuples of element index, clip path to apply\n    clip_path_els = []\n    for idx, (el, shape) in enumerate(self._elements()):\n      if not shape.clip_path:\n        continue\n      clip_path_els.append(self._resolve_url(shape.clip_path, 'clipPath'))\n\n      # union all the shapes under the clipPath\n      # TODO what if the clip path contained non-shapes\n      clip_path = svg_pathops.union(*[_el_to_data(el) for el in clip_path_els[-1]])\n      clips.append((idx, clip_path))\n\n    # TODO handle inherited clipping\n    # https://www.w3.org/TR/SVG11/masking.html#EstablishingANewClippingPath\n\n    # apply clip path to target\n    for el_idx, clip_path in clips:\n      el, target = self.elements[el_idx]\n      target = (target.as_path()\n                .absolute(inplace=True))\n\n      target.d = svg_pathops.intersection(target, clip_path).d\n      target.clip_path = ''\n      self.elements[el_idx] = (el, target)\n\n    # destroy the clip path elements\n    for clip_path_el in clip_path_els:\n      clip_path_el.getparent().remove(clip_path_el)\n\n    # TODO destroy clip path container if now empty\n    # TODO destroy paths that are now empty?\n\n    return self\n\n  def _update_etree(self):\n    if not self.elements:\n      return\n    swaps = []\n    for old_el, shape in self.elements:\n      swaps.append((old_el, _data_to_el(shape)))\n    for old_el, new_el in swaps:\n      parent = old_el.getparent()\n      old_el.getparent().replace(old_el, new_el)\n    self.elements = None\n\n  def toetree(self):\n    self._update_etree()\n    return copy.deepcopy(self.svg_root)\n\n  def tostring(self):\n    self._update_etree()\n    return etree.tostring(self.svg_root)\n\n  def fromstring(string):\n    return SVG(etree.fromstring(string))\n\n  def parse(file_or_path):\n    return SVG(etree.parse(file_or_path))\n", "sub_path": "svg/svg.py", "file_name": "svg.py", "file_ext": "py", "file_size_in_byte": 4925, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "svg_meta.svgns", "line_number": 18, "usage_type": "call"}, {"api_name": "svg_meta.svgns", "line_number": 19, "usage_type": "call"}, {"api_name": "dataclasses.fields", "line_number": 21, "usage_type": "call"}, {"api_name": "dataclasses.fields", "line_number": 33, "usage_type": "call"}, {"api_name": "lxml.etree.Element", "line_number": 38, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 38, "usage_type": "name"}, {"api_name": "dataclasses.asdict", "line_number": 39, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 77, "usage_type": "call"}, {"api_name": "re.match", "line_number": 87, "usage_type": "call"}, {"api_name": "svg_meta.svgns", "line_number": 91, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 99, "usage_type": "call"}, {"api_name": "svg_pathops.union", "line_number": 115, "usage_type": "call"}, {"api_name": "svg_pathops.intersection", "line_number": 127, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 153, "usage_type": "call"}, {"api_name": "lxml.etree.tostring", "line_number": 157, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 157, "usage_type": "name"}, {"api_name": "lxml.etree.fromstring", "line_number": 160, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 160, "usage_type": "name"}, {"api_name": "lxml.etree.parse", "line_number": 163, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 163, "usage_type": "name"}]}
+{"seq_id": "365374249", "text": "#%%\n\nimport numpy as np\nfrom os import listdir\nimport re, string, pickle\nfrom os.path import isfile, join\nfrom keras.utils import to_categorical\nfrom PIL import ImageFont, ImageDraw, Image\nfrom sklearn.preprocessing import LabelEncoder\n\n#%%\n\nROOT_DIR = '.'\nDATA_DIR = join(ROOT_DIR, '..', 'data')\nSRC_DIR = ROOT_DIR\nIMG_DIR = join(ROOT_DIR,'..', 'data', 'images')\n\n#%%\n\n# load font files\nfont_files = [f for f in listdir(join(DATA_DIR, 'fonts')) if isfile(join(DATA_DIR, 'fonts', f)) and re.search('.(o|t)tf$', f)]\n\n#%%\n\n# image height/width\nINPUT_H = INPUT_W = 64\n\n# printable character classes\nCHARACTER_CLASSES = string.printable[:-6]\n\n#%%\n\nFONTSIZE_RANGE = [30, 45]\n\nIMAGE_DATA = []\n\nsave_image = 0\n\n# flag to save generated character images (.jpg files)\n\nfor file in font_files:\n    print(\"[INFO] Generating images for font >>\", file)\n\n    file_name, _ = file.split('.')\n    font_tags = file_name.split('_')\n\n    for font_size in FONTSIZE_RANGE:\n        print(\"\\t[INFO] Generating images for font size >>\", font_size)\n\n        # load font file\n        font = ImageFont.truetype(join(DATA_DIR, 'fonts', file), font_size)\n\n        for index, character in enumerate(CHARACTER_CLASSES):\n\n            image = Image.new('L', (INPUT_W, INPUT_H), color='white')\n            draw = ImageDraw.Draw(image)\n\n            # pre-evaluate font width/height for centering purpose\n            w, h = draw.textsize(character, font = font)\n\n            # offset positions for centered font characters\n            x_offset = (INPUT_W-w)/2\n            y_offset = (INPUT_H-h)/2\n\n            draw.text((x_offset, y_offset), character, font = font)\n\n            if save_image:\n                image_path = join(IMG_DIR, '_'.join([file_name, str(index), str(font_size) + '.jpg']))\n                image.save(image_path)\n\n\n            IMAGE_DATA.append([\n                file_name,\n                font_tags[0],\n                character,\n                int('B' in font_tags),\n                int('I' in font_tags),\n                np.array(image).reshape((64,64,1))\n            ])\n\n#%%\n\nfont_labels = []\nchar_labels = []\nbold_labels = []\nitalics_labels = []\n\nfor data in IMAGE_DATA:\n    font, char, bold, italics = data[1:5]\n    font_labels.append(font)\n    char_labels.append(char)\n    bold_labels.append(bold)\n    italics_labels.append(italics)\n\ndef one_hot_encoder(class_inputs):\n    integer_encoder = LabelEncoder()\n    encoded_inputs = integer_encoder.fit_transform(class_inputs)\n    encoded_inputs = encoded_inputs.reshape(len(encoded_inputs), 1)\n\n    return [integer_encoder, encoded_inputs]\n\n# encode fonts\nfont_encoder, encoded_fonts = one_hot_encoder(font_labels)\n\n# encode fonts\nchar_encoder, encoded_char = one_hot_encoder(char_labels)\n\n# encode fonts\nbold_encoder, encoded_bold = one_hot_encoder(bold_labels)\n\n# encode fonts\nitalics_encoder, encoded_italics = one_hot_encoder(italics_labels)\n\n\n# one hot encode\nencoded_font_labels = to_categorical(encoded_fonts)\nencoded_character_labels = to_categorical(encoded_char)\nencoded_bold_labels = to_categorical(encoded_bold)\nencoded_italics_labels = to_categorical(encoded_italics)\n\n\nTRAIN_LABELS = []\n\nfor i in range(len(IMAGE_DATA)):\n    TRAIN_LABELS.append([\n        IMAGE_DATA[i][5],\n        encoded_font_labels[i],\n        encoded_character_labels[i],\n        encoded_bold_labels[i],\n        encoded_italics_labels[i]\n    ])\n\n#%%\n# dump label_encoder\npickle.dump(\n    [font_encoder, char_encoder, bold_encoder, italics_encoder],\n    open(join(SRC_DIR, 'label_encoders.p'), 'wb')\n)\n\nnp.save(join(SRC_DIR, 'train_data.npy'), np.array(TRAIN_LABELS))\nprint('train_data.npy')\n\n#%%\n", "sub_path": "src/data_generator.py", "file_name": "data_generator.py", "file_ext": "py", "file_size_in_byte": 3619, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 21, "usage_type": "call"}, {"api_name": "re.search", "line_number": 21, "usage_type": "call"}, {"api_name": "string.printable", "line_number": 29, "usage_type": "attribute"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 51, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 51, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "PIL.Image.new", "line_number": 55, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 55, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 56, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 56, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 78, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 96, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 116, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 117, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 118, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 119, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 135, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 140, "usage_type": "call"}]}
+{"seq_id": "113483885", "text": "\"\"\"\nThis preoduces the data for the example of Nexa for wall street with columns\n\"\"\"\n\nimport numpy as np\n\nfrom inputs.sensors import Sensor, PerceptualSpace\nfrom inputs.lag_structure import LagStructure\nfrom nexa.nexa import Nexa\nfrom nexa.saving import NexaSaverHDF5\nimport h5py\n\nsignal_location = './data/wall_street_data.hdf5'\nsignal_location = './data/wall_street_data_spaces.hdf5'\nsignal_location = './data/wall_street_data_30.hdf5'\n\n# Access the data and load it into signal\nwith h5py.File(signal_location, 'r') as f:\n    dset = f['signal']\n    signals = np.empty(dset.shape, np.float)\n    dset.read_direct(signals)\n\n\n# Get the data and copy it\nNdata = signals.shape[0]\nNside = signals.shape[1]\nNdata = 15000\nsignals = signals[:Ndata, ...]\nsignals_columns = signals.swapaxes(1, 2).reshape(Ndata * Nside, Nside)\nsignals_columns += np.random.uniform(size=signals_columns.shape)\nprint('zeros', np.sum(signals_columns[0] == 0))\nprint('signals shape', signals_columns.shape)\n\n# Now we need the nexa thing\ndt = 1.0\nmax_lag = 10\nlag_times = np.arange(0, max_lag, 1)\nwindow_size = signals_columns.shape[0] - (lag_times[-1] + 1)\nweights = None\n\nlag_structure = LagStructure(lag_times=lag_times, weights=weights, window_size=window_size)\nsensors = [Sensor(signal, dt, lag_structure) for signal in signals_columns.T]\nperceptual_space = PerceptualSpace(sensors, lag_first=True)\n\nNside_aux = 30  # The side of the image\nindex_to_cluster = np.zeros(lag_times.size * Nside_aux)\nfor index in range(index_to_cluster.size):\n    index_to_cluster[index] = index % max_lag\n\nNtime_clusters = 3\nNspatial_clusters = int(max_lag)\nNembedding = 3\n\nNtime_clusters_set = np.arange(10, 25, 5, dtype=int)\n\nfor Ntime_clusters in Ntime_clusters_set:\n    print('------------------')\n    print('Ntime clusters', Ntime_clusters, Ntime_clusters_set.size)\n    # Get the normal nexa object\n    nexa_object = Nexa(perceptual_space, Nspatial_clusters, Ntime_clusters, Nembedding)\n\n    nexa_object.calculate_distance_matrix()\n    print('STDM shape', nexa_object.STDM.shape)\n    print('Distance matrix calculated')\n    nexa_object.calculate_embedding()\n    print('Embedding calculated')\n    nexa_object.calculate_spatial_clustering()\n    print('Spatial clustering calculated')\n    nexa_object.calculate_cluster_to_indexes()\n    print('Cluster to index calculated')\n    nexa_object.calculate_time_clusters()\n    print('Time clusters calculated')\n\n    # Open the saver \n    data_base_name = 'text_wall_street_columns_30'\n    saver = NexaSaverHDF5(data_base_name, 'a')\n    # Save \n    run_name = 'test'\n    saver.save_complete_run(nexa_object, run_name)\n    print('Saved Mix')\n\n    # Get the independent nexa object\n    nexa_object = Nexa(perceptual_space, Nspatial_clusters, Ntime_clusters, Nembedding)\n\n    nexa_object.calculate_distance_matrix()\n    print('STDM shape', nexa_object.STDM.shape)\n    print('Distance matrix calculated')\n    nexa_object.index_to_cluster = index_to_cluster\n    print('Spatial clustering calculated')\n    nexa_object.calculate_cluster_to_indexes()\n    print('Cluster to index calculated')\n    nexa_object.calculate_time_clusters_indp()\n    print('Time clusters calculated')\n\n    # Open the saver \n    data_base_name = 'text_wall_street_columns_30'\n    saver = NexaSaverHDF5(data_base_name, 'a')\n    # Save \n    run_name = 'indep'\n    saver.save_complete_run(nexa_object, run_name)\n    print('Saved Independent')\n", "sub_path": "nexa_for_wall_street_columns.py", "file_name": "nexa_for_wall_street_columns.py", "file_ext": "py", "file_size_in_byte": 3403, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "h5py.File", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 30, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 37, "usage_type": "call"}, {"api_name": "inputs.lag_structure.LagStructure", "line_number": 41, "usage_type": "call"}, {"api_name": "inputs.sensors.Sensor", "line_number": 42, "usage_type": "call"}, {"api_name": "inputs.sensors.PerceptualSpace", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 54, "usage_type": "call"}, {"api_name": "nexa.nexa.Nexa", "line_number": 60, "usage_type": "call"}, {"api_name": "nexa.saving.NexaSaverHDF5", "line_number": 76, "usage_type": "call"}, {"api_name": "nexa.nexa.Nexa", "line_number": 83, "usage_type": "call"}, {"api_name": "nexa.saving.NexaSaverHDF5", "line_number": 97, "usage_type": "call"}]}
+{"seq_id": "518834470", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\n   File Name:     日内回转交易\n   Description :\n   Author :       haoyuan.m\n   date:          2018/10/16\n-------------------------------------------------\n   Change Activity:\n                   2018/10/16:\n-------------------------------------------------\n\"\"\"\n\n__author__ = 'haoyuan.m'\n\n'''\n本策略首先买入SHSE.600000股票10000股\n随后根据60s的数据计算MACD(12,26,9),\n在MACD>0的时候买入100股;在MACD<0的时候卖出100股\n但每日操作的股票数不超过原有仓位,并于收盘前把仓位调整至开盘前的仓位\n回测数据为:SHSE.600000的60s数据\n回测时间为:2017-09-01 08:00:00到2017-10-01 16:00:00\n'''\n\nfrom atrader import *\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\nimport sys\n\ntry:\n    import talib\nexcept:\n    print('请安装TA-Lib库')\n    sys.exit(-1)\n\n\ndef init(context):\n    # 用于判定第一个仓位是否成功开仓\n    context.first = 0\n    set_backtest(initial_cash=100000, stock_cost_fee=0.1)\n    reg_kdata('min', 1)\n    # 日内回转每次交易100股\n    context.trade_n = 100\n    # 获取昨今天的时间\n    context.day = [0, 0]\n    # 用于判断是否触发了回转逻辑的计时\n    context.ending = 0\n    # 需要保持的总仓位\n    context.total = 10000\n\ndef on_data(context):\n    bar = get_current_bar()\n    if context.first == 0:\n        # 最开始配置仓位\n        # 购买10000股浦发银行股票\n        order_volume(account_idx=0, target_idx=0, volume=context.total, side=1, position_effect=1, order_type=2,\n                     price=0)\n        print(context.now, context.target_list[0], '以市价单开多仓10000股')\n        context.first = 1.\n        day = bar.time_bar.iloc[0]\n        context.day[-1] = day.day\n        # 每天的仓位操作\n        context.turnaround = [0, 0]\n        return\n\n    # 更新最新的日期\n    day = bar.time_bar.iloc[0]\n    context.day[0] = day.day\n    # 若为新的一天,则重置标记信息。\n    if context.day[0] != context.day[-1]:\n        context.ending = 0\n        context.turnaround = [0, 0]\n    # 如果一天结束,则\n    if context.ending == 1:\n        return\n    # 若有可用的昨仓则操作\n    if context.total >= 0:\n        # 获取时间序列数据\n        recent_data = get_reg_kdata(reg_idx=context.reg_kdata[0], length=35, fill_up=True, df=True).close\n        if recent_data.isna().any():\n            return\n        macd = talib.MACD(recent_data.astype(float))[2].iloc[-1]\n        # 根据MACD>0则开仓,小于0则平仓\n        if macd > 0:\n\n            # 多空单向操作都不能超过昨仓位,否则最后无法调回原仓位\n            if (context.turnaround[0] + context.trade_n) < context.total:\n                # 计算累计仓位\n                context.turnaround[0] += context.trade_n\n                order_volume(account_idx=0, target_idx=0, volume=context.trade_n, side=1, position_effect=1,\n                             order_type=2, price=0)\n                print(context.now, context.target_list[0], '市价单开多仓', context.trade_n, '股')\n        elif macd < 0:\n            if (context.turnaround[1] + context.trade_n) < context.total:\n                context.turnaround[1] += context.trade_n\n                order_volume(account_idx=0, target_idx=0, volume=context.trade_n, side=2, position_effect=2,\n                             order_type=2, price=0)\n                print(context.now, context.target_list[0], '市价单平多仓', context.trade_n, '股')\n        # 临近收盘时若仓位数不等于昨仓则回转所有仓位\n        if (day.strftime('%Y-%m-%d %H:%M:%S')[11:16] == '14:55') or (\n                day.strftime('%Y-%m-%d %H:%M:%S')[11:16] == '14:57'):\n            position = context.account().positions['volume_long'][0]\n            if position != context.total:\n                order_target_volume(account_idx=0, target_idx=0, target_volume=context.total, side=1,\n                                    order_type=2, price=0)\n                print('市价单回转仓位操作...')\n                context.ending = 1\n        # 更新过去的日期数据\n        context.day[-1] = context.day[0]\n\n\nif __name__ == '__main__':\n    recent_data = ['SSE.600000']\n    run_backtest('日内回转交易', '日内回转交易.py', target_list=recent_data, frequency='min', fre_num=1, begin_date='2018-08-01',\n                 end_date='2018-10-01', fq=1)\n", "sub_path": "掘金示例/日内回转交易.py", "file_name": "日内回转交易.py", "file_ext": "py", "file_size_in_byte": 4467, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "sys.exit", "line_number": 35, "usage_type": "call"}, {"api_name": "talib.MACD", "line_number": 83, "usage_type": "call"}]}
+{"seq_id": "188781548", "text": "from functools import total_ordering\nfrom typing import Type, TypeVar, Tuple, List, Dict, Any\n\nfrom connectrum.svr_info import ServerInfo\nfrom connectrum.constants import DEFAULT_PORTS\n\nfrom pycoin.tx.Tx import Tx\nfrom pycoin.tx.TxOut import TxOut\nfrom pycoin.tx.Spendable import Spendable\nfrom pycoin.tx.pay_to.ScriptPayToAddressWit import ScriptPayToAddressWit\nfrom pycoin.key.BIP32Node import BIP32Node\nfrom pycoin.ui import address_for_pay_to_script\nfrom pycoin.encoding import hash160\nfrom pycoin.networks import bech32_hrp_for_netcode\nfrom pycoin.contrib import segwit_addr\n\nfrom pycoin.serialize import b2h\n\nclass MyServerInfo(ServerInfo):\n    def get_port(self, for_protocol: str) -> Tuple[str, int, bool]:\n        '''\n            Return (hostname, port number, ssl) pair for the protocol.\n            Assuming only one port per host.\n        '''\n        assert len(for_protocol) == 1, \"expect single letter code\"\n        rv = [i for i in self['ports'] if i[0] == for_protocol]\n        port = None\n        if len(rv) < 2:\n            try:\n                port = int(rv[0][1:])\n            except Exception:\n                pass\n        port = port or DEFAULT_PORTS[for_protocol]\n        use_ssl = for_protocol in ('s', 'g')\n        return self['hostname'], port, use_ssl\n\n@total_ordering\nclass LexTxOut(TxOut):\n    T = TypeVar(\"T\", bound=\"LexTxOut\")\n    @staticmethod\n    def demote(lexout: Type[T]) -> TxOut:\n        return TxOut(lexout.coin_value, lexout.script)\n\n    def __eq__(self, other) -> bool:\n        return (self.coin_value, b2h(self.script)) == \\\n            (other.coin_value, b2h(other.script))\n\n    def __lt__(self: T, other: Type[T]) -> bool:\n        return (self.coin_value, b2h(self.script)) < \\\n            (other.coin_value, b2h(other.script))\n\n@total_ordering\nclass LexSpendable(Spendable):\n    T = TypeVar(\"T\", bound=\"LexSpendable\")\n    @classmethod\n    def promote(cls: Type[T], spendable: Spendable) -> T:\n        return cls.from_dict(spendable.as_dict())\n\n    def __eq__(self, other) -> bool:\n        self_dict = self.as_dict()  # type: Dict[str, Any]\n        other_dict = other.as_dict()  # type: Dict[str, Any]\n        return (self_dict[\"tx_hash_hex\"], self_dict[\"tx_out_index\"]) == \\\n            (other_dict[\"tx_hash_hex\"], other_dict[\"tx_out_index\"])\n\n    def __lt__(self: T, other: Type[T]) -> bool:\n        self_dict = self.as_dict()  # type: Dict[str, Any]\n        other_dict = other.as_dict()  # type: Dict[str, Any]\n        return (self_dict[\"tx_hash_hex\"], self_dict[\"tx_out_index\"]) < \\\n            (other_dict[\"tx_hash_hex\"], other_dict[\"tx_out_index\"])\n\nclass SegwitBIP32Node(BIP32Node):\n    def bech32_p2wpkh_address(self) -> str:\n        hrp = bech32_hrp_for_netcode(self.netcode())\n        witprog_version = 1\n        p2aw_script = self.p2wpkh_script()\n        return segwit_addr.encode(hrp, witprog_version, p2aw_script)\n\n    def p2sh_p2wpkh_address(self) -> str:\n        p2aw_script = self.p2wpkh_script()  # type: bytes\n        return address_for_pay_to_script(p2aw_script, netcode=self.netcode())\n\n    def p2wpkh_script_hash(self) -> bytes:\n        p2aw_script = self.p2wpkh_script()  # type: bytes\n        return hash160(p2aw_script)\n\n    def p2wpkh_script(self) -> bytes:\n        hash160_c = self.hash160(use_uncompressed=False)  # type: bytes\n        return ScriptPayToAddressWit(b'\\0', hash160_c).script()\n\ndef main():\n    svr = MyServerInfo(\"onion\",\n                       hostname=\"fdkhv2bb7hqel2e7.onion\",\n                       ports=12345)  # type: MyServerInfo\n    print(svr.get_port(\"t\"))\n\n    hex_ = [\n        \"01000000014f2eae2eadabe4e807fad4220a931991590ae31f223ba70bf1\",\n        \"8dd16983005441010000006b483045022100ab33f14e1c3387b68942e1ab\",\n        \"bd4ec0e2d94866529409464e262531c165cc75f0022034482cd3031bb779\",\n        \"852baaedae91c43b61c84ca3eecad6220e91c24e1227e30a0121022798d6\",\n        \"f62e0c4d01c16ef51599e9d9d60048f3930c03f0da8681b1884ce2b411ff\",\n        \"ffffff02873e0800000000001976a914c8f91ed83b0e345751e62e392be8\",\n        \"be0494d0617b88ac538e4c39000000001976a9149b004c3bdcfaa929c336\",\n        \"8d221deb26303d7e72c788ac00000000\"]  # type: List[str]\n    tx_hex = \"\".join(hex_)  # type: str\n    tx = Tx.from_hex(tx_hex)  # type: Tx\n\n    utxos = [LexSpendable.promote(utxo)\n             for utxo in tx.tx_outs_as_spendable()]  # type: List[LexSpendable]\n    utxos.sort()\n    print(utxos)\n\n    txouts = [LexTxOut(txout.coin_value, txout.script)\n              for txout in tx.txs_out]  # type: List[LexTxOut]\n    txouts.sort()\n    print([str(txout) for txout in txouts])\n\n    secret = \"CORRECT HORSE BATTERY STAPLE\"  # type: str\n    mpk = SegwitBIP32Node.from_master_secret(\n        secret.encode(\"utf-8\"))  # type: SegwitBIP32Node\n    print(mpk.p2sh_p2wpkh_address())\n    print(mpk.bech32_p2wpkh_address())\n\nif __name__ == \"__main__\":\n    main()\n", "sub_path": "nowallet/subclasses.py", "file_name": "subclasses.py", "file_ext": "py", "file_size_in_byte": 4827, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "connectrum.svr_info.ServerInfo", "line_number": 19, "usage_type": "name"}, {"api_name": "connectrum.constants.DEFAULT_PORTS", "line_number": 33, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 20, "usage_type": "name"}, {"api_name": "pycoin.tx.TxOut.TxOut", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.TypeVar", "line_number": 39, "usage_type": "call"}, {"api_name": "typing.Type", "line_number": 41, "usage_type": "name"}, {"api_name": "pycoin.tx.TxOut.TxOut", "line_number": 42, "usage_type": "call"}, {"api_name": "pycoin.tx.TxOut.TxOut", "line_number": 41, "usage_type": "name"}, {"api_name": "pycoin.serialize.b2h", "line_number": 45, "usage_type": "call"}, {"api_name": "pycoin.serialize.b2h", "line_number": 46, "usage_type": "call"}, {"api_name": "typing.Type", "line_number": 48, "usage_type": "name"}, {"api_name": "pycoin.serialize.b2h", "line_number": 49, "usage_type": "call"}, {"api_name": "pycoin.serialize.b2h", "line_number": 50, "usage_type": "call"}, {"api_name": "functools.total_ordering", "line_number": 37, "usage_type": "name"}, {"api_name": "pycoin.tx.Spendable.Spendable", "line_number": 53, "usage_type": "name"}, {"api_name": "typing.TypeVar", "line_number": 54, "usage_type": "call"}, {"api_name": "typing.Type", "line_number": 56, "usage_type": "name"}, {"api_name": "pycoin.tx.Spendable.Spendable", "line_number": 56, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 65, "usage_type": "name"}, {"api_name": "functools.total_ordering", "line_number": 52, "usage_type": "name"}, {"api_name": "pycoin.key.BIP32Node.BIP32Node", "line_number": 71, "usage_type": "name"}, {"api_name": "pycoin.networks.bech32_hrp_for_netcode", "line_number": 73, "usage_type": "call"}, {"api_name": "pycoin.contrib.segwit_addr.encode", "line_number": 76, "usage_type": "call"}, {"api_name": "pycoin.contrib.segwit_addr", "line_number": 76, "usage_type": "name"}, {"api_name": "pycoin.ui.address_for_pay_to_script", "line_number": 80, "usage_type": "call"}, {"api_name": "pycoin.encoding.hash160", "line_number": 84, "usage_type": "call"}, {"api_name": "pycoin.tx.pay_to.ScriptPayToAddressWit.ScriptPayToAddressWit", "line_number": 88, "usage_type": "call"}, {"api_name": "pycoin.tx.Tx.Tx.from_hex", "line_number": 106, "usage_type": "call"}, {"api_name": "pycoin.tx.Tx.Tx", "line_number": 106, "usage_type": "name"}]}
+{"seq_id": "100144316", "text": "# -*- coding: utf-8 -*-\nfrom sma import SMA\nfrom gevent import Greenlet, sleep\n\nclass Snapshot(object):\n    def __init__(self, sensor):\n        self.sensor = sensor\n\n    def get(self, keys):\n        data = self.sensor.read_data()\n        return dict([(k, {'data':data[k]}) for k in keys])\n\nclass StatsWorker(Greenlet):\n    def __init__(self, sensor, interval=1, num_series=10):\n        Greenlet.__init__(self)\n        self.sensor = sensor\n        self.interval = interval\n        self.data = {}\n        self._initialize_data_slot(self.sensor, self.data, num_series)\n\n    def _initialize_data_slot(self, sensor, slot, num):\n        data = sensor.read_data()\n        for k, v in data.iteritems():\n            slot[k] = SMA(num)\n\n    def _run(self):\n        while True:\n            for k, v in self.sensor.read_data().iteritems():\n                self.data[k].add(v)\n            sleep(self.interval)\n\n    def get(self, keys):\n        return dict([(k, {'data':self.data[k].average(),\n                          'sigma':self.data[k].sigma()}) for k in keys])\n\n    def __str__(self):\n        return '%s(%s)' %(self.__class__.__name__, self.sensor)\n\nif __name__ == '__main__':\n    try:\n        from bme280 import BME280\n    except ImportError:\n        from bme280 import BME280Mock as BME280\n    keys = ['T', 'P', 'H']\n    w = StatsWorker.spawn(BME280(0x76))\n    print(w)\n    try:\n        while w:\n            results = w.get(keys)\n            print(' '.join([\"%s:%.3f~%.3f\" % (k, results[k]['data'], results[k]['sigma']) for k in keys]))\n            sleep(3)\n    except KeyboardInterrupt:\n       pass \n    finally:\n        w.kill()\n        w.dead\n\n", "sub_path": "monitor.py", "file_name": "monitor.py", "file_ext": "py", "file_size_in_byte": 1641, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "gevent.Greenlet", "line_number": 13, "usage_type": "name"}, {"api_name": "gevent.Greenlet.__init__", "line_number": 15, "usage_type": "call"}, {"api_name": "gevent.Greenlet", "line_number": 15, "usage_type": "name"}, {"api_name": "sma.SMA", "line_number": 24, "usage_type": "call"}, {"api_name": "gevent.sleep", "line_number": 30, "usage_type": "call"}, {"api_name": "bme280.BME280Mock", "line_number": 45, "usage_type": "call"}, {"api_name": "gevent.sleep", "line_number": 51, "usage_type": "call"}]}
+{"seq_id": "555234825", "text": "import cv2 as cv\r\n\r\ndef SinCityEffect(filename):\r\n    bgr = cv.imread(filename)\r\n    B = bgr[:, :, 0]\r\n    G = bgr[:, :, 1]\r\n    R = bgr[:, :, 2]\r\n    H = 0\r\n    for x in range(bgr.shape[0]):\r\n        for y in range(bgr.shape[1]):\r\n            Cmax = max(R[x][y], G[x][y], B[x][y])\r\n            Cmin = min(R[x][y], G[x][y], B[x][y])\r\n            Delta = (float)(Cmax - Cmin)\r\n            if Delta == 0:  # Hue\r\n                H = 0\r\n            elif Cmax == R[x][y]:\r\n                tmp = ((G[x][y] - B[x][y]) / Delta)\r\n                if tmp < 0:\r\n                    H = 60 * (tmp + 6)\r\n                else:\r\n                    H = 60 * tmp\r\n            elif Cmax == G[x][y]:\r\n                H = 60 * (((B[x][y] - R[x][y]) / Delta) + 2.0)\r\n            elif Cmax == B[x][y]:\r\n                H = 60 * ((R[x][y] - G[x][y] / Delta) + 4.0)\r\n            print(H)\r\n            if 3 <= H <= 344:\r\n                R[x][y] = Cmin\r\n                G[x][y] = Cmin\r\n                B[x][y] = Cmin\r\n\r\n    return bgr\r\n\r\nimg = SinCityEffect('../../testImages/SinCity.jpg')\r\ncv.imshow(\"SinCity\",img)\r\ncv.waitKey(0)", "sub_path": "Programmes en Python/SinCity/SinCity.py", "file_name": "SinCity.py", "file_ext": "py", "file_size_in_byte": 1105, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "cv2.imread", "line_number": 4, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 36, "usage_type": "call"}]}
+{"seq_id": "528084950", "text": "from os import walk\nfrom os import path\nimport time\nfrom sklearn.svm import LinearSVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nimport pickle\nfrom timeit import default_timer as timer\nfrom Features import *\n\n\n\n\ndef get_fileNames(path):\n\tdata = []\n\tfor root, dirs, files in os.walk(path):\n\t\tfor name in files:\n\t\t\tif name.endswith((\".jpg\", \".jpeg\",\".png\")):\n\t\t\t\tbaseName=os.path.join(root,name)\n\t\t\t\tprint(baseName)\n\t\t\t\tdata.append(baseName)\n\treturn data\n\n\ndef trainAndTest():\n\tcolor_space = 'YCrCb'\n\torient = 9  \n\tpix_per_cell = 8\n\tcell_per_block = 2\n\thog_channel = \"ALL\" \n\tspatial_size = (32, 32) \n\thist_bins = 32   \n\tspatial_feat = True \n\thist_feat = True \n\thog_feat = True\n\t\n\n\tpathPlates='/home/kafein/plateRecognition/plates/plt'\n\tplates=get_fileNames(pathPlates)\n\tprint(len(plates))\n\tpltFeatures = extractFeature(plates, color_space=color_space, \n\t\t\t               spatial_size=spatial_size, hist_bins=hist_bins, \n\t\t\t               orient=orient, pix_per_cell=pix_per_cell, \n\t\t\t               cell_per_block=cell_per_block, \n\t\t\t               hog_channel=hog_channel, spatial_feat=spatial_feat, \n\t\t\t               hist_feat=hist_feat, hog_feat=hog_feat)\n\n\tpathNotPlt='/home/kafein/plateRecognition/plates/Extras'\n\tnotPlt=get_fileNames(pathNotPlt)\n\tprint(len(notPlt))\n\tnotPltFeatures = extractFeature(notPlt, color_space=color_space, \n\t\t\t               spatial_size=spatial_size, hist_bins=hist_bins, \n\t\t\t               orient=orient, pix_per_cell=pix_per_cell, \n\t\t\t               cell_per_block=cell_per_block, \n\t\t\t               hog_channel=hog_channel, spatial_feat=spatial_feat, \n\t\t\t               hist_feat=hist_feat, hog_feat=hog_feat)\n\t\t\n\tX = np.vstack((pltFeatures, notPltFeatures)).astype(np.float64)                        \n\tX_scaler = StandardScaler().fit(X)\n\tscaled_X = X_scaler.transform(X)\n\ty = np.hstack((np.ones(len(pltFeatures)), np.zeros(len(notPltFeatures))))\n\n\trand_state = np.random.randint(0, 100)\n\tX_train, X_test, y_train, y_test = train_test_split(\n\tscaled_X, y, test_size=0.2, random_state=rand_state)\n\n\tprint('Using:',orient,'orientations',pix_per_cell,\n\t\t'pixels per cell and', cell_per_block,'cells per block')\n\tprint('Feature vector length:', len(X_train[0]))\n\t     \n\tt=time.time()\n\tsvc=LinearSVC().fit(X_train, y_train)\n\tt2 = time.time()\n\tprint(round(t2-t, 2), 'Seconds to train SVC...')\n\tscore1 = round(svc.score(X_test, y_test), 4) \n\tprint('Test Accuracy of SVC = ', score1)\n\tt=time.time()\n\n\ttree = DecisionTreeClassifier(criterion = \"entropy\", random_state = 100,\n\t \t\t\t\t\tmax_depth=3, min_samples_leaf=5)\n\ttree.fit(X_train, y_train)\n\tscore2 = round(tree.score(X_test, y_test), 4)\n\tprint('Test Accuracy of DecisionTree = ', score2)\n\n\tgauss = GaussianNB()\n\tgauss.fit(X_train, y_train)\n\tscore3 = round(gauss.score(X_test, y_test), 4)\n\tprint('TesT Accuracy of Gaussian = ', score3)\n\t\n\tif score1 > score2:\n\t\tif score1 > score3:\n\t\t\tselect = svc\n\t\t\tprint('Best Accuracy = SVC')\n\t\telse:\n\t\t\tselect = gauss\n\t\t\tprint('Best Accuracy = Gaussian')\n\telse:\n\t\tif score2 > score3:\n\t\t\tselect = tree\n\t\t\tprint('Best Accuracy = DecisionTree')\n\t\telse:\n\t\t\tselect = gauss\n\t\t\tprint('Best Accuracy = Gaussian')\n\n\tmodelName = 'pltClassifier.p'\n\tclsModel = {}\n\tclsModel[\"model\"] = select\n\tclsModel[\"scaler\"] = X_scaler\n\tpickle.dump(clsModel, open(modelName, 'wb'))\n\tprint (\"model saved\")\n\t\ntrainAndTest()\n\n\t\t\n\n\n\t\t\n\n", "sub_path": "procesAndSelectClassifier.py", "file_name": "procesAndSelectClassifier.py", "file_ext": "py", "file_size_in_byte": 3515, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "os.walk", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "argument"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 62, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 67, "usage_type": "call"}, {"api_name": "time.time", "line_number": 74, "usage_type": "call"}, {"api_name": "sklearn.svm.LinearSVC", "line_number": 75, "usage_type": "call"}, {"api_name": "time.time", "line_number": 76, "usage_type": "call"}, {"api_name": "time.time", "line_number": 80, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 82, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes.GaussianNB", "line_number": 88, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 112, "usage_type": "call"}]}
+{"seq_id": "235765842", "text": "#!C:\\Program Files\\Python37\\Python.exe\r\nprint(\"Content-type:text/html \\n\")\r\nimport cgi\r\nimport pickle\r\nf = cgi.FieldStorage()\r\ncarbrand = f.getvalue(\"carBrand\")\r\nmodelno = f.getvalue(\"modelno\")\r\ncity = f.getvalue(\"city\")\r\nyear = f.getvalue(\"year\")\r\nkm = f.getvalue(\"km\")\r\nfuel = f.getvalue(\"fuel\")\r\ntransmission = f.getvalue(\"transmission\")\r\nownertype = f.getvalue(\"ownertype\")\r\nmileage = f.getvalue(\"mileage\")\r\nengine = f.getvalue(\"engine\")\r\npower = f.getvalue(\"power\")\r\nseats = f.getvalue(\"seats\")\r\nwith open('check.pkl', 'rb') as f:\r\n    x = pickle.load(f)\r\n# predict the test set results\r\ny_pred = x.predict([[float(city), float(year), float(km), float(fuel), float(transmission), float(ownertype),\r\n                     float(mileage), float(engine), float(power), float(seats)]])\r\nredirectURL = \"http://localhost/Animesh/final.py?data={:.2f}\".format(y_pred[0])\r\nprint('')\r\nprint('  ')\r\nprint('    ')\r\nprint('  ')\r\nprint('')\r\n", "sub_path": "CarPricePrediction/CarPriceWithObject.py", "file_name": "CarPriceWithObject.py", "file_ext": "py", "file_size_in_byte": 1027, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "cgi.FieldStorage", "line_number": 5, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 19, "usage_type": "call"}]}
+{"seq_id": "539940126", "text": "\"\"\"\nDecorator Module\n\"\"\"\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\n\n\ndef page(size=None):\n    \"\"\"\n    列表视图分页装饰器\n    如果视图函数被此装饰装饰,则表示此视图为分页视图,size的值依次由URL查询参数size、自定义size,默认size依次决定,\n    offset则由URL offset参数或默认值(0)决定。\n    Args:\n        size: 分页大小\n\n    Returns:\n        列表\n    \"\"\"\n    # 参数检查\n    if 'PAGINATION' in settings.NAMEKO and 'SIZE' in settings.NAMEKO['PAGINATION']:\n        PAGINATION_DEFAULT_SIZE = settings.NAMEKO['PAGINATION']['SIZE']  # 默认分页大小\n    else:\n        raise ImproperlyConfigured(\n            '''Django settings `NAMEKO` need include `PAGINATION` setting and include `SIZE` in `PAGINATION`.'''\n        )\n    size = PAGINATION_DEFAULT_SIZE if not size else size\n\n    def decorator(func):\n        def wrapper(self, request, *args, **kwargs):\n            nonlocal size\n            if 'size' in request.GET and request.GET['size'].isnumeric():\n                if int(request.GET['size']) > 0:\n                    size = int(request.GET['size'])\n\n            if 'offset' in request.GET and request.GET['offset'].isnumeric():\n                offset = int(request.GET['offset'])     # >=0\n            else:\n                offset = 0\n\n            request_get_copy = request.GET.copy()   # request.GET 对象为不可变对象\n            request_get_copy['offset'] = offset + size\n            next_url = request.build_absolute_uri(request.path) + '?' + request_get_copy.urlencode()\n            if offset == 0:\n                prev_url = None\n            else:\n                request_get_copy['offset'] = offset - size\n                if request_get_copy['offset'] < 0:\n                    request_get_copy['offset'] = 0\n                prev_url = request.build_absolute_uri(request.path) + '?' + request_get_copy.urlencode()\n\n            request.pagination = {'size': size, 'offset': offset, 'prev': prev_url, 'next': next_url}\n            print(request.pagination)\n            return func(self, request, *args, **kwargs)\n        return wrapper\n    return decorator\n\n", "sub_path": "nameko_django_gateway_utils/decorators.py", "file_name": "decorators.py", "file_ext": "py", "file_size_in_byte": 2197, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "django.conf.settings.NAMEKO", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 20, "usage_type": "name"}, {"api_name": "django.conf.settings.NAMEKO", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 21, "usage_type": "name"}, {"api_name": "django.core.exceptions.ImproperlyConfigured", "line_number": 23, "usage_type": "call"}]}
+{"seq_id": "179153514", "text": "#This is a small application for the Tuggerah Family Festival ran by Scripture Union.\n#Author: Reece Payne\n#License: I'm claiming GPL, modify as needed, but remember to remove the bits specific to\n#the Tuggerah Family Festival.\n\n#Import SQLLite for our database goodness.\nimport sqlite3\nimport hashlib\nimport os.path\nfrom os import urandom, remove\nimport logging\nfrom logging.handlers import RotatingFileHandler\nimport urllib\nimport configparser\nfrom functools import wraps\n#Import a bunch of Flask stuff.\nfrom flask import Flask\nfrom flask import g\nfrom flask import request\nfrom flask import render_template\nfrom flask import session, redirect, url_for, abort\n\nfrom flask.ext.sqlalchemy import SQLAlchemy\n\nfrom flask.ext import assets\n\nfrom flask.ext.login import LoginManager, login_user , logout_user , current_user , login_required\n\nimport markdown2\n\n####Wrapper overloads####\ndef login_required(role=\"ANY\"):\n    def wrapper(fn):\n        @wraps(fn)\n        def decorated_view(*args, **kwargs):\n            if not current_user.is_authenticated():\n              return lm.unauthorized()\n            if ((current_user.role != role) and (role != \"ANY\")):\n                return lm.unauthorized()\n            return fn(*args, **kwargs)\n        return decorated_view\n    return wrapper\n\ndbname = \"mission\"\n\n#Here for demo purposes, needs to be set somewhere safer in the long term. Also, this will change for the prod version.\napp = Flask(__name__)\n\nlm = LoginManager()\nlm.init_app(app)\nlm.login_view = 'login'\n\n#Webassets for managing all the fun UI stuffs.\nenv = assets.Environment(app)\n\nenv.load_path = [\n    os.path.join(os.path.dirname(__file__), 'scss'),\n    os.path.join(os.path.dirname(__file__), 'coffee'),\n    os.path.join(os.path.dirname(__file__), 'bower_components'),\n    os.path.join(os.path.dirname(__file__), 'css'),\n]\n\n#Yay!\nenv.register(\n    'js_main',\n    assets.Bundle(\n        'jquery/dist/jquery.min.js',\n        'bootstrap/dist/js/bootstrap.js',\n        assets.Bundle(\n            'all.coffee',\n            filters=['coffeescript']\n        ),\n        output='js_all.js'\n    )\n)\n\nenv.register(\n    'bstrap',\n      assets.Bundle(\n          'adminfix.css',\n          'bootstrap/dist/css/bootstrap.css',\n          'bootstrap/dist/css/bootstrap-theme.css',\n          output='bstrap.css'\n      ),\n)\n\nenv.register(\n    'css_main',\n    assets.Bundle(\n      'bootstrap/dist/css/bootstrap.css',\n      'bootstrap/dist/css/bootstrap-theme.css',\n      assets.Bundle(\n          'all.scss',\n          filters='scss',\n      ),\n      output='all_css.css'\n    )\n)\n\nenv.register(\n    'css_login',\n      assets.Bundle(\n          'login.css',\n          output='login_css.css'\n      ),\n)\n\n\n\n\nconfig = configparser.ConfigParser()\n\ndbpath = \"\"\n\nif not os.path.isfile('config.ini'):\n  dbpath = \"none\"\nelse:\n  config.read('config.ini')\n  dbpath = config['main']['db']\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///'+dbpath\n\ndb = SQLAlchemy(app)\n\n#The general structure is going to be simple, the database contains a bunch of\n#'pages', we show those pages when the correct URL is entered.\n\n#also, there will be a simple admin interface for updating content.\n\n#YAY! let's see if I can finish something for once.\n\n@app.route('/')\ndef index():\n  #Changed this all up, using scrolling coolpages now...\n  markdowner = markdown2.Markdown()\n\n  main = Page.query.filter_by(title=\"main\").first()\n  if not main:\n    content_main = \"Please create a page called main to edit this page\"\n  else:\n    content_main = markdowner.convert(main.content)\n\n  location = Page.query.filter_by(title=\"location\").first()\n  if not location:\n    content_location = \"Please create a page called location to edit this page\"\n  else:\n    content_location = markdowner.convert(location.content)\n\n  funkym = Page.query.filter_by(title=\"funkymonkeys\").first()\n  if not funkym:\n    content_funkym = \"Please create a page called funkymonkeys to edit this page\"\n  else:\n    content_funkym = markdowner.convert(funkym.content)\n\n  excite = Page.query.filter_by(title=\"excite\").first()\n  if not excite:\n    content_excite = \"Please create a page called excite to edit this page\"\n  else:\n    content_excite = markdowner.convert(excite.content)\n\n  tnt = Page.query.filter_by(title=\"tnt\").first()\n  if not tnt:\n    content_tnt = \"Please create a page called tnt to edit this page\"\n  else:\n    content_tnt = markdowner.convert(tnt.content)\n\n\n  return render_template('mission.html',  title=\"Tuggerah Family Festival\", main=content_main, location=content_location, excite=content_excite, funkym=content_funkym, tnt=content_tnt)\n\n@app.route('/admin/newpage', methods=['GET', 'POST'])\n@login_required(role=\"admin\")\ndef newpage():\n\n  if request.method == \"POST\":\n    title = sanitise(request.form['Title'].lower())\n    slug = sanitise(request.form['Title'].lower()).replace(\" \", \"-\")\n    content = request.form['Content']\n    newpage = Page(title, slug, content)\n    db.session.add(newpage)\n    db.session.commit()\n    return redirect(url_for('index'))\n  else:\n    new_page_name = \"\"\n    if request.args.get('page'):\n      new_page_name = sanitise(request.args.get('page'))\n\n    return render_template('newpage.html',  title=\"New Page\", pagename=new_page_name)\n\n\n@app.route('/admin/newuser', methods=['GET', 'POST'])\n@login_required(role=\"admin\")\ndef newuser():\n\n  if request.method == \"GET\":\n    return render_template('newuser.html',  title=\"Add New User\")\n  elif request.method == \"POST\":\n    username = sanitise(request.form[\"UserName\"])\n    email = sanitise(request.form[\"Email\"])\n    password = sanitise(request.form[\"Pass\"])\n    role = \"user\"\n    if 'is_admin' in request.form:\n      role = \"admin\"\n    user = User(username, email, hash_pass(password), role)\n    db.session.add(user)\n    db.session.commit()\n    return render_template('newuser.html',  title=\"Add New User\")\n  else:\n    abort(500)\n\n\n@app.route('/admin/listpages')\n@login_required(role=\"admin\")\ndef listpages():\n\n  pages = db.session.query(Page).all()\n  return render_template('listpages.html',  title=\"Page List\", pagelist=pages)\n\n\n@app.route('/admin/deletepage/')\n@login_required(role=\"admin\")\ndef deletepage(pageid):\n\n  page = Page.query.filter_by(id=pageid).first()\n  db.session.delete(page)\n  db.session.commit()\n  return redirect(url_for('listpages'))\n\n#edit a page\n@app.route('/admin/editpage/', methods=['GET', 'POST'])\n@login_required(role=\"admin\")\ndef editpage(pageid):\n\n  if request.method == \"POST\":\n    page = Page.query.filter_by(id=pageid).first()\n    page.content = request.form[\"Content\"]\n    db.session.commit()\n    return redirect(url_for('listpages'))\n  elif request.method == \"GET\":\n    page = Page.query.filter_by(id=pageid).first()\n    return render_template('editpage.html', title=\"Edit Page \"+page.title.title(), pageid=pageid, pgtitle=page.title.title(), content=page.content)\n  else:\n    abort(500)\n\n\n\n#edit the nav menu\n@app.route('/admin/editnav', methods=['GET', 'POST'])\n@login_required(role=\"admin\")\ndef editnav():\n\n  if request.method == \"POST\":\n    navfile = open(\"templates/navitems.html\", 'w')\n    pagelist = request.form.getlist(\"pages\")\n    for pageid in pagelist:\n      page = Page.query.filter_by(id=pageid).first()\n      navfile.write(\"
  • \"+page.title.title()+\"
  • \\n\")\n navfile.close()\n return redirect(url_for('index'))\n #return redirect(url_for('index'))\n elif request.method == \"GET\":\n pages = db.session.query(Page).all()\n return render_template('editnav.html', title=\"Edit Nav Menu\", pagelist=pages)\n else:\n abort(500)\n\n\n\n###Deprecated with seperate setup.py account.\n#Set up the application, can potentially be deleted.\n#@app.route('/setup', methods=['GET', 'POST'])\n#def setup():\n# #Need to stick a check in to return a 404 if this website is already set up.\n# if os.path.isfile(dbname+\".db\"):\n# abort(404)\n# else:\n# if request.method == \"GET\":\n# return render_template('setup.html', title=\"Setup\")\n# elif request.method == \"POST\":\n# db.create_all()\n# username = sanitise(request.form[\"UserName\"])\n# email = sanitise(request.form[\"Email\"])\n# password = sanitise(request.form[\"Pass\"])\n# admin = User(username, email, hash_pass(password), \"admin\")\n# secretkey = urandom(90)\n# secretconf = Config('secretkey', secretkey)\n# db.session.add(admin)\n# db.session.add(secretconf)\n# db.session.commit()\n# return \"Done!\"\n# else:\n# abort(500)\n\n#Log the user in.\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n #This will either display the login form to the users (GET method) or log in the user through args passed by the login\n #form (POST)\n if request.method == \"POST\":\n username = sanitise(request.form['UserName'])\n password = hash_pass(request.form['Pass'])\n user = User.query.filter_by(username=username,password=password).first()\n if user is None:\n flash('Username or Password is invalid' , 'error')\n return redirect(url_for('login'))\n session['username'] = user.username\n login_user(user)\n return redirect(url_for('index'))\n\n elif request.method == \"GET\":\n return render_template('login.html', title=\"Login\")\n\n@app.route('/scrollr')\ndef scrollr_test():\n #Remember to remove this!\n return render_template('scrollr.html')\n\n#Log the user out.\n@app.route('/logout')\ndef logout():\n logout_user()\n return render_template('message.html', title=\"Logged Out\", message=\"You have been logged out.\")\n\n#The guts of displaying content\n@app.route('/pages/')\ndef pages(slug):\n page = Page.query.filter_by(slug=slug).first()\n markdowner = markdown2.Markdown()\n contents = markdowner.convert(page.content)\n return render_template('page.html', title=page.title.title(), fill=contents)\n\n##########################Error Pages#############################\n@app.errorhandler(404)\ndef error_404(error):\n errormsg = \"\"\"\n

    It appears the page you tried to access does not exist.\n Please use the Navigation bar to find the page you are after.\n

    \n \"\"\"\n\n return render_template('error.html', title=\"404 Error\", errortitle=\"404 - Page Not Found\", errortext=errormsg)\n\n@app.errorhandler(500)\ndef error_500(error):\n errormsg = \"\"\"\n

    Something has broken... uh, I'm sure we'll get it fixed.\n This error has been logged, so we should be on top of it.\n

    \n \"\"\"\n app.logger.error('An error occurred ' + str(error) + \" Request: \" + str(request))\n return render_template('error.html', title=\"500 Error\", errortitle=\"500 - Internal Server Error\", errortext=errormsg)\n\n@app.errorhandler(401)\ndef error_401(error):\n errormsg = \"\"\"\n

    You tried to do something that only logged in users can do.\n Please log in or don't try that again.\n

    \n \"\"\"\n app.logger.warning('Someone attempted to access something they do not have access to. Source: ' + request.remote_addr + \" \" + str(error) + \" Request: \" + str(request))\n return render_template('error.html', title=\"401 Error\", errortitle=\"401 - Unauthorised\", errortext=errormsg)\n\n###############Some other functions###################\n@app.before_request\ndef before_req():\n if os.path.isfile(dbname+\".db\"):\n g.pagecount = db.session.query(Page).count()\n else:\n g.pagecount = 0\n\n@app.before_request\ndef before_request():\n g.user = current_user\n\n@lm.user_loader\ndef load_user(id):\n return User.query.get(int(id))\n\n####DB Classes####\n\nclass User(db.Model):\n id = db.Column(db.Integer, primary_key = True)\n username = db.Column(db.String(64), unique = True)\n email = db.Column(db.String(120), unique = True)\n role = db.Column(db.SmallInteger, default = \"user\")\n password = db.Column(db.String(120))\n\n def is_authenticated(self):\n return True\n\n def is_active(self):\n return True\n\n def is_anonymous(self):\n return False\n\n def get_id(self):\n return str(self.id).encode('utf-8')\n\n def __repr__(self):\n return '' % (self.username)\n\nclass Page(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(120), unique=True)\n slug = db.Column(db.String(120), unique=True)\n content = db.Column(db.Text)\n def __init__(self, title, slug, content):\n self.title = title\n self.slug = slug\n self.content = content\n\n def get_id():\n return id\n\n def __repr__(self):\n return '' % self.title\n\nclass Config(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n configitem = db.Column(db.String(120), unique=True)\n value = db.Column(db.String(120))\n\n def __init__(self,configitem,value):\n self.configitem = configitem\n self.value = value\n\n def __repr__(self):\n return '' % self.configitem\n\n\n\n##########Misc Helper Functions##############\ndef hash_pass(PassWord):\n password_encrypted = hashlib.sha256(PassWord.encode())\n return password_encrypted.hexdigest()\n\ndef authenticate_user(password_submitted, user):\n\n if user.password == hash_pass(password_submitted):\n return True\n else:\n return False\n\n#Checks if a user has the correct role to perform an action.\n#The input argument 'role' should have a list of the roles that can perform\n#an action, ie, ['writer', 'admin'] and so on.\ndef check_role(roles):\n if 'user_id' not in session:\n abort(401)\n user = User.query.filter_by(id=session['user_id']).first()\n if not user:\n abort(401)\n has_role = False\n for role in roles:\n if role == user.role:\n has_role = True\n return has_role\n\n\n#Cleans up user input by converting unicode to HTML entities.\ndef sanitise(text):\n return escape(text, quote=True)\n\ndef escape(s, quote=None):\n '''Replace special characters \"&\", \"<\" and \">\" to HTML-safe sequences.\n If the optional flag quote is true, the quotation mark character (\")\n is also translated.'''\n s = s.replace(\"&\", \"&\") # Must be done first!\n s = s.replace(\"<\", \"<\")\n s = s.replace(\">\", \">\")\n if quote:\n s = s.replace('\"', \""\")\n return s\n\n\n\n#this little bit of magic sets the session key from the DB, and sets a default otherwise.\nif os.path.isfile(dbname+\".db\"):\n key = Config.query.filter_by(configitem=\"secretkey\").first()\n app.secret_key = key.value\nelse:\n app.secret_key = 'thiswillchange'\n\nif __name__ == '__main__':\n formatter = logging.Formatter(\"[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s\")\n handler = RotatingFileHandler('logs/mission.log', maxBytes=10000, backupCount=1)\n handler.setLevel(logging.INFO)\n handler.setFormatter(formatter)\n app.logger.addHandler(handler)\n app.run(debug=True)\n", "sub_path": "mission.py", "file_name": "mission.py", "file_ext": "py", "file_size_in_byte": 14736, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "flask.ext.login.current_user.is_authenticated", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user", "line_number": 36, "usage_type": "name"}, {"api_name": "flask.ext.login.current_user.role", "line_number": 38, "usage_type": "attribute"}, {"api_name": "flask.ext.login.current_user", "line_number": 38, "usage_type": "name"}, {"api_name": "functools.wraps", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.ext.login.LoginManager", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.ext.assets.Environment", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.ext.assets", "line_number": 54, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 57, "usage_type": "name"}, {"api_name": "os.path.path.dirname", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 58, "usage_type": "name"}, {"api_name": "os.path.path.dirname", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 59, "usage_type": "name"}, {"api_name": "os.path.path.dirname", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 60, "usage_type": "name"}, {"api_name": "os.path.path.dirname", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.ext.assets.Bundle", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.ext.assets", "line_number": 66, "usage_type": "name"}, {"api_name": "flask.ext.assets.Bundle", "line_number": 69, "usage_type": "call"}, {"api_name": "flask.ext.assets", "line_number": 69, "usage_type": "name"}, {"api_name": "flask.ext.assets.Bundle", "line_number": 79, "usage_type": "call"}, {"api_name": "flask.ext.assets", "line_number": 79, "usage_type": "name"}, {"api_name": "flask.ext.assets.Bundle", "line_number": 89, "usage_type": "call"}, {"api_name": "flask.ext.assets", "line_number": 89, "usage_type": "name"}, {"api_name": "flask.ext.assets.Bundle", "line_number": 92, "usage_type": "call"}, {"api_name": "flask.ext.assets", "line_number": 92, "usage_type": "name"}, {"api_name": "flask.ext.assets.Bundle", "line_number": 102, "usage_type": "call"}, {"api_name": "flask.ext.assets", "line_number": 102, "usage_type": "name"}, {"api_name": "configparser.ConfigParser", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path.path.isfile", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 115, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 115, "usage_type": "name"}, {"api_name": "flask.ext.sqlalchemy.SQLAlchemy", "line_number": 123, "usage_type": "call"}, {"api_name": "markdown2.Markdown", "line_number": 135, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 168, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 174, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 174, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 175, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 175, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 176, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 176, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 177, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 177, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 181, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 181, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 184, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 184, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 184, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 185, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 185, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 185, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 187, "usage_type": "call"}, {"api_name": "flask.ext.login.login_required", "line_number": 171, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 194, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 194, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 195, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 196, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 196, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 197, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 197, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 198, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 198, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 199, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 199, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 201, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 201, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 206, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 208, "usage_type": "call"}, {"api_name": "flask.ext.login.login_required", "line_number": 191, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 216, "usage_type": "call"}, {"api_name": "flask.ext.login.login_required", "line_number": 212, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 226, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 226, "usage_type": "call"}, {"api_name": "flask.ext.login.login_required", "line_number": 220, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 233, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 233, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 235, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 235, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 237, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 237, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 238, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 238, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 240, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 242, "usage_type": "call"}, {"api_name": "flask.ext.login.login_required", "line_number": 230, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 251, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 251, "usage_type": "name"}, {"api_name": "flask.request.form.getlist", "line_number": 253, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 253, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 253, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 258, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 258, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 260, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 260, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 262, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 264, "usage_type": "call"}, {"api_name": "flask.ext.login.login_required", "line_number": 248, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 298, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 298, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 299, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 299, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 300, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 300, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 304, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 304, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 305, "usage_type": "name"}, {"api_name": "flask.ext.login.login_user", "line_number": 306, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 307, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 307, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 309, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 309, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 310, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 315, "usage_type": "call"}, {"api_name": "flask.ext.login.logout_user", "line_number": 320, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 321, "usage_type": "call"}, {"api_name": "markdown2.Markdown", "line_number": 327, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 329, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 340, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 349, "usage_type": "argument"}, {"api_name": "flask.render_template", "line_number": 350, "usage_type": "call"}, {"api_name": "flask.request.remote_addr", "line_number": 359, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 359, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 360, "usage_type": "call"}, {"api_name": "os.path.path.isfile", "line_number": 365, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 365, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 365, "usage_type": "name"}, {"api_name": "flask.g.pagecount", "line_number": 366, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 366, "usage_type": "name"}, {"api_name": "flask.g.pagecount", "line_number": 368, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 368, "usage_type": "name"}, {"api_name": "flask.g.user", "line_number": 372, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 372, "usage_type": "name"}, {"api_name": "flask.ext.login.current_user", "line_number": 372, "usage_type": "name"}, {"api_name": "hashlib.sha256", "line_number": 434, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 448, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 449, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 450, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 452, "usage_type": "call"}, {"api_name": "os.path.path.isfile", "line_number": 478, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 478, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 478, "usage_type": "name"}, {"api_name": "logging.Formatter", "line_number": 485, "usage_type": "call"}, {"api_name": "logging.handlers.RotatingFileHandler", "line_number": 486, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 487, "usage_type": "attribute"}]} +{"seq_id": "23418057", "text": "import pygame\nfrom pygame.locals import *\nimport lib.perso\nimport lib.img\nimport lib.ennemi\nfrom random import*\nimage = lib.img.image()\n\n#classes de point d'apparition des ennemis\nclass spawn(pygame.sprite.Sprite):\n\n def __init__(self, posx, posy):\n \n super().__init__()\n\n self.image= pygame.image.load(image.spawn).convert_alpha()\n self.rect= self.image.get_rect()\n self.vie=2\n self.rect.x=posx\n self.rect.y=posy\n \n def update(self, Sprite_en, event):\n\n if event.spawn==True:\n en=lib.ennemi.ennemi(self.rect.x,self.rect.y)\n Sprite_en.add(en)\n\n if event.spawnattack==True:\n self.vie-=1\n event.spawnattack=False\n \n if self.vie==0:\n self.kill()\n\n \n", "sub_path": "lib/spawn.py", "file_name": "spawn.py", "file_ext": "py", "file_size_in_byte": 806, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "lib.perso.img.image", "line_number": 7, "usage_type": "call"}, {"api_name": "lib.perso.img", "line_number": 7, "usage_type": "attribute"}, {"api_name": "lib.perso", "line_number": 7, "usage_type": "name"}, {"api_name": "pygame.sprite", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 16, "usage_type": "attribute"}, {"api_name": "lib.perso.ennemi.ennemi", "line_number": 25, "usage_type": "call"}, {"api_name": "lib.perso.ennemi", "line_number": 25, "usage_type": "attribute"}, {"api_name": "lib.perso", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "480181891", "text": "# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tabular Q-Learner example on Tic Tac Toe.\n\nTwo Q-Learning agents are trained by playing against each other. Then, the game\ncan be played against the agents from the command line.\n\nAfter about 10**5 training episodes, the agents reach a good policy: win rate\nagainst random opponents is around 99% for player 0 and 92% for player 1.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nimport sys\nfrom absl import app\nfrom absl import flags\nimport numpy as np\nfrom six.moves import *\n\nimport pyspiel\n\nfrom open_spiel.python import rl_environment\nfrom open_spiel.python.algorithms import random_agent\nfrom open_spiel.python.algorithms import tabular_qlearner\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_integer(\"num_episodes\", int(1e5), \"Number of train episodes.\")\n\n\ndef eval_two_agents(env, agent0, agent1, num_episodes, game_number):\n \"\"\"Evaluates `trained_agents` against `random_agents` for `num_episodes`.\"\"\"\n state_count = np.zeros(4)\n state_count_rps = np.zeros(9)\n reward_count = 0\n for _ in range(num_episodes):\n time_step = env.reset()\n while not time_step.last():\n action0 = agent0.step(time_step, is_evaluation=True).action\n action1 = agent1.step(time_step, is_evaluation=True).action\n time_step = env.step([action0, action1])\n # prisoner's dilemma\n if game_number == 1:\n for i, state in enumerate([[3, 3], [0, 5], [5, 0], [1, 1]]):\n if state == time_step.rewards:\n state_count[i] += 1\n reward_count += state[0]\n # matching pennies\n elif game_number == 2:\n reward = time_step.rewards\n if reward == [1, -1]:\n # Linksboven\n if action0 == 0:\n state_count[0] += 1\n # Rechtsonder\n else:\n state_count[3] += 1\n elif reward == [-1, 1]:\n # Rechtsboven\n if action0 == 0:\n state_count[1] += 1\n # Linksonder\n else:\n state_count[2] += 1\n reward_count += reward[0]\n # battle of the sexes\n elif game_number == 3:\n for i, state in enumerate([[2, 1], [0, 0], [0, 0], [1, 2]]):\n if state == time_step.rewards:\n # de `if` vermijdt dat er wordt geteld tijdens foute combinaties (vermijdt dubbeltellen).\n state_count[i] += 1 if not (i == 1 and action0 == 1 or i == 2 and action0 == 0) else 0\n reward_count += state[0] if not (i == 1 and action0 == 1 or i == 2 and action0 == 0) else 0\n # rock, paper, scissors\n elif game_number == 4:\n for i, state in enumerate([[0, 0], [-0.25, 0.25], [0.5, -0.5], [0.25, -0.25], [0, 0], [-0.05, 0.05], [-0.5, 0.5], [0.05, -0.05], [0, 0]]):\n if state == time_step.rewards:\n state_count_rps[i] += 1 if not (i == 0 and (action0 == 1 or action0 == 2)\n or i == 4 and (action0 == 0 or action0 == 2)\n or i == 8 and (action0 == 0 or action0 == 1)) else 0\n reward_count += state[0] if not (i == 0 and (action0 == 1 or action0 == 2)\n or i == 4 and (action0 == 0 or action0 == 2)\n or i == 8 and (action0 == 0 or action0 == 1)) else 0\n logging.info(\"Average utility for player 0: \" + str(reward_count/num_episodes))\n if game_number != 4:\n return state_count / num_episodes\n else:\n return state_count_rps / num_episodes\n\n\ndef main(_):\n game1 = pyspiel.create_matrix_game(\"prisoners_dilemma\", \"Prisoner's Dilemma\",\n [\"C\", \"B\"], [\"C\", \"B\"],\n [[3, 0], [5, 1]], [[3, 5], [0, 1]])\n game2 = pyspiel.create_matrix_game(\"matching_pennies\", \"Matching Pennies\",\n [\"H\", \"T\"], [\"H\", \"T\"],\n [[1, -1], [-1, 1]], [[-1, 1], [1, -1]])\n game3 = pyspiel.create_matrix_game(\"battle_of_the_sexes\", \"Battle of the Sexes\",\n [\"B\", \"S\"], [\"B\", \"S\"],\n [[2, 0], [0, 1]], [[1, 0], [0, 2]])\n game4 = pyspiel.create_matrix_game(\"biased_rock_paper_scissors\", \"Biased Rock, Paper, Scissors\",\n [\"R\", \"P\", \"S\"], [\"R\", \"P\", \"S\"],\n [[0, -0.25, 0.5], [0.25, 0, -0.05], [-0.5, 0.05, 0]], [[0, 0.25, -0.5],[-0.25, 0, 0.05], [0.5, -0.05, 0]])\n\n game_number = input(\"Choose a game\\n - Prisoner's Dilemma: 1 \\n - Matching Pennies: 2 \\n - Battle of the Sexes: 3 \\n - Biased Rock, Paper, Scissors: 4 \\n\\n Game number is \")\n game = locals()[\"game{}\".format(game_number)]\n\n env = rl_environment.Environment(game)\n num_actions = env.action_spec()[\"num_actions\"]\n num_players = 2\n\n agents1 = [\n tabular_qlearner.QLearner(player_id=idx, num_actions=num_actions)\n for idx in range(num_players)\n ]\n\n agent2 = []\n\n agents = agents1\n\n # random agents for evaluation\n random_agents = [\n random_agent.RandomAgent(player_id=idx, num_actions=num_actions)\n for idx in range(num_players)\n ]\n\n # 1. Train the agents\n training_episodes = FLAGS.num_episodes\n for cur_episode in range(training_episodes):\n if cur_episode % int(1e4) == 0:\n state_rate_self_play = eval_two_agents(env, agents[0], agents[1], 10000, int(game_number))\n state_rate_random = eval_two_agents(env, agents[0], random_agents[1], 10000, int(game_number))\n logging.info(\"Starting episode %s, state_rate_self_play %s, state_rate_random %s\", cur_episode, state_rate_self_play, state_rate_random)\n time_step = env.reset()\n # print(time_step)\n while not time_step.last():\n step_output0 = agents[0].step(time_step)\n step_output1 = agents[1].step(time_step)\n action0 = step_output0.action\n action1 = step_output1.action\n time_step = env.step([action0, action1])\n # Episode is over, step all agents with final info state.\n for agent in agents:\n agent.step(time_step)\n\nif __name__ == \"__main__\":\n app.run(main)\n", "sub_path": "open_spiel/python/project/part_1/q_learning_tabular/simultaneous_game.py", "file_name": "simultaneous_game.py", "file_ext": "py", "file_size_in_byte": 7123, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "absl.flags.FLAGS", "line_number": 41, "usage_type": "attribute"}, {"api_name": "absl.flags", "line_number": 41, "usage_type": "name"}, {"api_name": "absl.flags.DEFINE_integer", "line_number": 43, "usage_type": "call"}, {"api_name": "absl.flags", "line_number": 43, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 49, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 98, "usage_type": "call"}, {"api_name": "pyspiel.create_matrix_game", "line_number": 106, "usage_type": "call"}, {"api_name": "pyspiel.create_matrix_game", "line_number": 109, "usage_type": "call"}, {"api_name": "pyspiel.create_matrix_game", "line_number": 112, "usage_type": "call"}, {"api_name": "pyspiel.create_matrix_game", "line_number": 115, "usage_type": "call"}, {"api_name": "open_spiel.python.rl_environment.Environment", "line_number": 122, "usage_type": "call"}, {"api_name": "open_spiel.python.rl_environment", "line_number": 122, "usage_type": "name"}, {"api_name": "open_spiel.python.algorithms.tabular_qlearner.QLearner", "line_number": 127, "usage_type": "call"}, {"api_name": "open_spiel.python.algorithms.tabular_qlearner", "line_number": 127, "usage_type": "name"}, {"api_name": "open_spiel.python.algorithms.random_agent.RandomAgent", "line_number": 137, "usage_type": "call"}, {"api_name": "open_spiel.python.algorithms.random_agent", "line_number": 137, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 147, "usage_type": "call"}, {"api_name": "absl.app.run", "line_number": 161, "usage_type": "call"}, {"api_name": "absl.app", "line_number": 161, "usage_type": "name"}]} +{"seq_id": "145048826", "text": "#!/usr/bin/env python\n\nimport numpy as np\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport subprocess\nfrom mpl_toolkits.mplot3d import Axes3D\n\nNMODEL = 36\nPINTVL = 10\nANIMAX = 300\n\nf0 = 1.032e-4\ng = 9.81\n\ndef main():\n mkdirs()\n nad, timd = read_file(\"evol_field.dat\", 0.0)\n plot_time(nad, timd)\n # plot_anime(nad)\n # plot_3d_trajectory(nad[:, 21], nad[:, 29], nad[:, 0])\n\ndef mkdirs():\n subprocess.run(\"rm -rf img\", check=True, shell=True)\n subprocess.run(\"mkdir -p img\", check=True, shell=True)\n subprocess.run(\"mkdir -p img/a_gph\", check=True, shell=True)\n subprocess.run(\"mkdir -p img/a_t\", check=True, shell=True)\n subprocess.run(\"mkdir -p img/o_psi\", check=True, shell=True)\n subprocess.run(\"mkdir -p img/o_t\", check=True, shell=True)\n\ndef read_file(file, discard):\n # return np.ndarray[time, NMODEL]\n with open(file, \"r\") as f:\n ar = f.read().split()\n ar2 = []\n n = len(ar)\n na = np.empty((n // (NMODEL + 1), NMODEL))\n tim = np.empty((n // (NMODEL + 1)))\n for i in range(n // (NMODEL + 1)):\n tim[i] = ar[i * (NMODEL + 1)]\n na[i, :] = ar[i * (NMODEL + 1) + 1:(i + 1) * (NMODEL + 1)]\n nt = na.shape[0]\n nad = na[int(nt * discard):nt, :]\n timd = tim[int(nt * discard):nt]\n return nad, timd\n\ndef plot_time(nad, timd):\n for i in range(NMODEL):\n plt.plot(timd * 3.07e-4, nad[:, i])\n plt.xlabel(\"model year\")\n plt.savefig(\"img/x_%02d.png\" % i)\n plt.close()\n\ndef plot_snap(cmaxs, nad, i):\n it = i // PINTVL\n psia, ta, psio, to = reconstruct_grid(nad[i], 20, 20)\n datas = {\"a_gph\": psia * f0 / g, \"a_t\": ta, \"o_psi\": psio, \"o_t\": to}\n for cmp in cmaxs:\n title = \"%s %04d\" % (cmp, it)\n plot_matrix(datas[cmp], \"img/%s/%s_%04d.png\" % (cmp, cmp, it), title, cmaxs[cmp], ipol=\"none\")\n\ndef plot_anime(nad):\n # cmaxs = {\"a_gph\": 500, \"a_t\": 20, \"o_psi\": 5e+5, \"o_t\": 40} # DDV2016\n cmaxs = {\"a_gph\": 500, \"a_t\": 20, \"o_psi\": 3e+4, \"o_t\": 40} # VL2016\n nt = nad.shape[0]\n for i in range(nt - PINTVL * ANIMAX, nt, PINTVL):\n plot_snap(cmaxs, nad, i)\n for dir in cmaxs:\n subprocess.run(\"convert -delay 8 -loop 0 ./img/%s/*.png ./img/%s/anime.gif\" % (dir, dir), check=True, shell=True)\n\ndef plot_3d_trajectory(x, y, z):\n # 3D trajectory\n # plt.rcParams[\"font.size\"] = 16\n fig = plt.figure()\n fig.subplots_adjust(left=0.02, bottom=0.02, right=0.98, top=0.98, wspace=0.04, hspace=0.04)\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(x, y, z, label=\"traj\", marker=\".\")\n # ax.set_xlim([-8e-4, 8e-4])\n # ax.set_ylim([-0.05, 0.2])\n # ax.set_zlim([0.02, 0.06])\n ax.set_xlabel(\"Psi o 2\")\n ax.set_ylabel(\"Theta o 2\")\n ax.set_zlabel(\"Psi a 1\")\n plt.savefig(\"./img/traj.png\")\n plt.close()\n\ndef plot_matrix(mat, out, title=\"\", cmax=None, ipol=\"none\"):\n # plt.rcParams[\"font.size\"] = 14\n fig, ax = plt.subplots(1)\n if cmax is None:\n cmax = np.max(np.abs(mat))\n cm = ax.imshow(mat, cmap=plt.cm.RdBu_r, aspect=0.7, interpolation=ipol)\n cm.set_clim(-1.0 * cmax, cmax)\n x0, x1 = ax.get_xlim()\n y0, y1 = ax.get_ylim()\n plt.colorbar(cm)\n plt.title(title)\n plt.savefig(out)\n plt.close()\n\ndef reconstruct_grid(waves, nx, ny):\n def fa(p):\n # return np.ndarray[ny, nx]\n return np.sqrt(2) * np.cos(p * y_grid)\n\n def fk(m, p):\n # return np.ndarray[ny, nx]\n return 2.0 * np.cos(m * n * x_grid) * np.sin(p * y_grid)\n\n def fl(h, p):\n # return np.ndarray[ny, nx]\n return 2.0 * np.sin(h * n * x_grid) * np.sin(p * y_grid)\n\n def phio(ho, po):\n # return np.ndarray[ny, nx]\n return 2.0 * np.sin(0.5 * ho * n * x_grid) * np.sin(po * y_grid)\n\n def atm(is_t):\n # return np.ndarray[ny, nx]\n gridval = 0.0\n for j in range(na):\n j_all = j + na if is_t else j\n if types[j] == \"A\":\n gridval = gridval + waves[j_all] * fa(ps[j])\n elif types[j] == \"K\":\n gridval = gridval + waves[j_all] * fk(hs[j], ps[j])\n else:\n gridval = gridval + waves[j_all] * fl(hs[j], ps[j])\n if is_t:\n # gridval *= (2.0 * f0 / R)\n gridval *= (f0 ** 2 * L ** 2) / R\n else:\n gridval *= L ** 2 * f0\n return gridval\n\n def ocn(is_t):\n # return np.ndarray[ny, nx]\n gridval = 0.0\n for j in range(no):\n j_all = j + (na * 2 + no) if is_t else j + na * 2\n gridval = gridval + waves[j_all] * phio(hos[j], pos[j])\n if is_t:\n gridval *= (f0 ** 2 * L ** 2) / R\n else:\n gridval -= np.mean(gridval)\n gridval *= L ** 2 * f0\n return gridval\n\n n = 1.5\n x_grid = np.empty((ny, nx))\n x_grid[:, :] = np.linspace(0, 2.0 * np.pi / n, nx)[np.newaxis, :]\n y_grid = np.empty((ny, nx))\n y_grid[:, :] = np.linspace(np.pi, 0, ny)[:, np.newaxis]\n\n na = 10\n no = 8\n R = 287\n L = 5000000 / np.pi\n types = [\"A\", \"K\", \"L\", \"A\", \"K\", \"L\", \"K\", \"L\", \"K\", \"L\"]\n hs = [0, 1, 1, 0, 1, 1, 2, 2, 2, 2]\n ps = [1, 1, 1, 2, 2, 2, 1, 1, 2, 2]\n hos = [1, 1, 1, 1, 2, 2, 2, 2]\n pos = [1, 2, 3, 4, 1, 2, 3, 4]\n\n return atm(False), atm(True), ocn(False), ocn(True)\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "python/plot.py", "file_name": "plot.py", "file_ext": "py", "file_size_in_byte": 5350, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "matplotlib.use", "line_number": 5, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 25, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 26, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 27, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 28, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 29, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "subprocess.run", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 93, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 152, "usage_type": "attribute"}, {"api_name": "numpy.newaxis", "line_number": 152, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 154, "usage_type": "attribute"}, {"api_name": "numpy.newaxis", "line_number": 154, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 159, "usage_type": "attribute"}]} +{"seq_id": "338383203", "text": "from copy import deepcopy\nfrom collections import defaultdict\n\nfrom formlang.contextfree import Terminal\n\n\ndef path_query_hellings(grammar, graph):\n grammar.normalize(weak=True)\n n = graph.number_of_nodes()\n dp = list()\n lhs = defaultdict(list)\n\n eps = grammar.get_epsilon_producers()\n\n for i in range(n):\n for el in eps:\n dp.append((el, i, i))\n\n for u, v, symbol in graph.edges(data=\"symbol\"):\n for prod in grammar.productions:\n if prod.rhs == [Terminal(symbol)]:\n dp.append((prod.lhs, u, v))\n\n for prod in grammar.productions:\n if len(prod.rhs) != 2:\n continue\n lhs[tuple(prod.rhs)].append(prod.lhs)\n\n rem = deepcopy(dp)\n\n while rem:\n n1, u, v = rem.pop(0)\n\n for n2, w, _u in dp:\n if _u != u:\n continue\n for n3 in lhs[n2, n1]:\n new = (n3, w, v)\n if new in dp:\n continue\n dp.append(new)\n rem.append(new)\n\n for n2, _v, w in dp:\n if _v != v:\n continue\n for n3 in lhs[n1, n2]:\n new = (n3, u, w)\n if new in dp:\n continue\n dp.append(new)\n rem.append(new)\n\n return sorted([(u, v) for n, u, v in dp if n == grammar.start])\n", "sub_path": "formlang/algo/hellings.py", "file_name": "hellings.py", "file_ext": "py", "file_size_in_byte": 1379, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "collections.defaultdict", "line_number": 11, "usage_type": "call"}, {"api_name": "formlang.contextfree.Terminal", "line_number": 21, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "320834318", "text": "#!/usr/bin/env python\nimport logging\nimport datetime\nimport string\nimport os.path\nimport glob\nimport sys\nimport getpass\nfrom logging.handlers import RotatingFileHandler\nfrom re import compile\nfrom bisect import bisect_left, insort_left\nfrom random import randint, SystemRandom\nfrom collections import deque, OrderedDict\nfrom subprocess import call\n# from time import sleep\n\n# installed dependencies\nimport requests\nimport pysftp\nfrom profanityfilter import ProfanityFilter\nfrom ldap3 import Server, Connection, NONE, SUBTREE\n# from selenium import webdriver\n# from selenium.common.exceptions import TimeoutException\n# from selenium.webdriver.support.ui import WebDriverWait\n# from selenium.webdriver.support import expected_conditions as EC\n# from selenium.webdriver.common.by import By\n\n__author__ = 'Jordan Page'\n__license__ = 'MIT'\n__version__ = '1.0.1'\n__email__ = 'jpage628@gmail.com'\n__date__ = '8/21/2019'\n__status__ = 'Development'\n\n\ndef create_student(information_list, grade_level_list):\n while True:\n logging.info(\"Enter the student's First name: \")\n first_name = input().strip()\n while check_name(first_name) is False:\n logging.info(\"A name must be alphabetical, or contain hyphens, spaces, or apostrophes.\")\n logging.info(\"Enter the student's First name: \")\n first_name = input().strip()\n\n logging.info(\"Enter the student's Last name: \")\n last_name = input().strip()\n while check_name(last_name) is False:\n logging.info(\"A name must be alphabetical, or contain hyphens, spaces, or apostrophes.\")\n logging.info(\"Enter the student's Last name: \")\n last_name = input()\n\n while True:\n try:\n logging.info(\"Enter the student's Grade level (0-12): \")\n grade_level = int(input())\n except ValueError:\n logging.info(\"The grade level must be a number (0-12).\")\n continue\n break\n\n while grade_level not in range(0, 13):\n logging.info(\"The grade level must be between 0 and 12.\")\n logging.info(\"Enter the student's Grade level (0-12): \")\n\n try:\n grade_level = int(input())\n except ValueError:\n logging.info(\"The grade level must be a number (0-12).\")\n continue\n\n graduation_year = (datetime.date.today().year + (12 - grade_level))\n first_name_split = split_name(first_name)\n last_name_split = split_name(last_name)\n grade_level_list.append(grade_level)\n\n username_list = usernames_from_sftp()[0]\n\n if len(last_name_split) >= 5:\n candidate = str(graduation_year)[2:] + last_name_split[:5] + first_name_split[0]\n while check_name_in_ldap(candidate) is False:\n candidate = \\\n resolve_username(candidate, username_list, first_name_split, last_name_split[:5], graduation_year,\n 'student')\n else:\n candidate = str(graduation_year)[2:] + last_name_split + first_name_split[0]\n while check_name_in_ldap(candidate) is False:\n candidate = \\\n resolve_username(candidate, username_list, first_name_split, last_name_split, graduation_year,\n 'student')\n\n logging.info('\\nCandidate username is: ' + candidate)\n\n word_list = make_word_file()\n\n secure_random = SystemRandom()\n first_word = secure_random.choice(word_list).strip()\n second_word = secure_random.choice(word_list).strip()\n\n # check if the password is either too long or contains profanity\n pf = ProfanityFilter()\n while len(first_word + second_word) > 10 or len(first_word + second_word) < 6 or \\\n \"'\" in first_word or \"'\" in second_word or pf.is_profane(first_word) or pf.is_profane(second_word):\n first_word = secure_random.choice(word_list).strip()\n second_word = secure_random.choice(word_list).strip()\n\n pwd = (first_word + second_word + str(randint(0, 9)) + str(randint(0, 9)))\n\n full_name = first_name.title() + ' ' + last_name.title()\n email = candidate + \"@snakeriver.org\"\n logging.info(\"\\nInformation:\")\n information =\\\n pwd + ',' + candidate + ',' + full_name + ',' + last_name.title() + ',' + first_name.title() + ',' + email\n logging.info(information)\n\n logging.info(\"\\nDo you want to keep creating student accounts?(y/n): \")\n user_prompt = input().lower()\n while not ((user_prompt == 'y') or (user_prompt == 'yes') or\n (user_prompt == 'n') or (user_prompt == 'no')):\n logging.info(\"You must enter y, yes, n, or no.\")\n logging.info(\"Do you want to keep creating student accounts?(y/n): \")\n user_prompt = input().lower()\n if (user_prompt == 'y') or (user_prompt == 'yes'):\n information_list.append(information)\n continue\n elif (user_prompt == 'n') or (user_prompt == 'no'):\n information_list.append(information)\n return information_list, grade_level_list\n\n\n# Simply checks a given name to see if it only contains\n# alphabetical characters, hyphens, spaces, or apostrophes\ndef check_name(name):\n allowed_chars = string.ascii_letters + \"'- \"\n if name == '':\n return False\n\n for letter in name:\n if letter in allowed_chars:\n continue\n else:\n return False\n\n return True\n\n\n# Ensure that the given first or last name splits before a hyphen, space, or apostrophe\ndef split_name(name):\n return name.split(None, 1)[0]\n\n\ndef usernames_from_sftp():\n cnopts = pysftp.CnOpts()\n cnopts.hostkeys = None\n # *** ENTER FTP USERNAME AND PASSWORD HERE\n srv = pysftp.Connection(host='10.110.204.14', username='_____', password='_____', port=22, cnopts=cnopts)\n srv.get('/Steve/student.csv', preserve_mtime=True)\n\n srv.close()\n ps_user_list_path = os.path.join(os.getcwd(), 'student.csv')\n ps_user_list = dict()\n needs_username_list = []\n wrong_web_id = dict()\n pattern = compile(r'^\\d\\d[a-zA-Z]{2,6}')\n\n with open(ps_user_list_path, mode='r', encoding='utf-8') as f:\n for line in f.readlines():\n curr_username = str(line.split(',')[2])\n first_name = str(line.split(',')[0]).title()\n last_name = str(line.split(',')[1]).title()\n curr_grade = str(line.split(',')[3]).strip()\n birthday = str(line.split(',')[4]).strip()\n student_id = str(line.split(',')[5]).strip()\n if int(curr_grade) < 0:\n continue\n if \"'\" in curr_username:\n curr_username = curr_username.replace(\"'\", \"\")\n if \" \" in curr_username:\n curr_username = curr_username.replace(\" \", \"\")\n\n if curr_username == '':\n needs_username_list.append([first_name, last_name, curr_grade, birthday, student_id])\n elif not pattern.fullmatch(curr_username):\n wrong_web_id[curr_username] = \\\n [split_name(first_name), split_name(last_name), curr_grade, birthday, student_id]\n else:\n ps_user_list[curr_username] = [first_name, last_name, curr_grade, birthday, student_id]\n f.close()\n\n logging.info('\\nStudent list successfully obtained via SFTP.\\n')\n\n logging.info('Students who need PowerSchool usernames: ')\n for student in needs_username_list:\n logging.info(student[0] + ' ' + student[1] + ', Grade ' + str(curr_grade))\n logging.info('\\n')\n\n logging.info(\"Students with incorrect web ID's: \")\n for student in wrong_web_id:\n logging.info(student)\n logging.info('\\n')\n if len(wrong_web_id) > 0:\n new_web_id = []\n for student in wrong_web_id.keys():\n graduation_year = (datetime.date.today().year + (12 - int(wrong_web_id[student][2])))[2:]\n new_web_id.append(resolve_username(student, ps_user_list, wrong_web_id[student][0],\n wrong_web_id[student][1], graduation_year, student))\n logging.info(\"Recommended new web ID's: \")\n for student in new_web_id:\n logging.info(student)\n logging.info('\\n')\n return ps_user_list, needs_username_list\n\n\ndef compare_to_ldap(powerschool_users, needs_ps_username=0):\n server = Server(host='10.110.204.21', port=636, use_ssl=True, get_info=NONE)\n logging.info('Please enter your LDAP username: ')\n login_name = str(input())\n password = getpass.getpass()\n conn = Connection(server, user='cn=' + login_name + ',ou=NoEmail,o=Snakeriver', password=password)\n conn.bind()\n while conn.result['description'] == 'invalidCredentials':\n logging.info('Incorrect username or password. Please try again.')\n logging.info('Please enter your LDAP username: ')\n login_name = str(input())\n password = getpass.getpass()\n conn = Connection(server, user='CN=' + login_name + ',ou=NoEmail,o=Snakeriver', password=password)\n conn.bind()\n\n ldap_un_list = []\n\n logging.info('\\n')\n search_filter = '(objectclass=Person)'\n for i in range(0, 13):\n curr_grade = 'Grade-' + str(i).zfill(2)\n search_base = 'ou=' + curr_grade + ',o=Snakeriver'\n logging.info('Searching ' + curr_grade)\n conn.search(search_base=search_base,\n search_filter=search_filter,\n search_scope=SUBTREE,\n attributes=['uid'])\n\n for entry in conn.entries:\n uid = entry['uid'].value\n ldap_un_list.append(uid)\n\n ldap_un_list.sort()\n\n exclusion_list = ['1st', '2nd', '3rd', '4th', '5th', '6th', '7th', '8th', '9th', '10th', '11th', '12th', 'billybob']\n for name in exclusion_list:\n if name in ldap_un_list:\n ldap_un_list.remove(name)\n \n if needs_ps_username == 1:\n return ldap_un_list\n\n logging.info('\\n' + str(len(ldap_un_list)) + ' total students in LDAP, Grades 0-12.')\n with open('ldap_un_list.log', mode='w') as file:\n for student in ldap_un_list:\n file.write(student + '\\n')\n needs_deletion = []\n for student in ldap_un_list:\n if student in powerschool_users.keys():\n continue\n needs_deletion.append(student)\n\n logging.info('\\nStudents who need to be deleted from LDAP:')\n logging.info(needs_deletion)\n logging.info('\\n' + str(len(needs_deletion)) + ' accounts to be deleted.')\n\n needs_account = OrderedDict()\n for student in powerschool_users.keys():\n conn.search(search_base='o=snakeriver',\n search_filter='(uid=' + student + ')',\n search_scope=SUBTREE)\n if len(conn.entries) > 0:\n continue\n needs_account[student] = powerschool_users[student]\n\n logging.info('\\nStudents who need to be added to LDAP:')\n logging.info(needs_account.keys())\n logging.info('\\n' + str(len(needs_account)) + ' accounts to be created in LDAP.')\n\n if len(needs_deletion) == 0:\n logging.info('No accounts need to be deleted.')\n else:\n error_count = 0\n # User exists in LDAP but not PowerSchool -> we can delete them from LDAP\n for username in needs_deletion:\n conn.search(search_base='o=snakeriver',\n search_filter='(uid=' + username + ')')\n user = conn.entries[0].entry_dn\n conn.delete(user)\n if str(conn.result['description']) == 'success':\n logging.info('Success - ' + username + ' deleted.')\n else:\n logging.info('Error - ' + username + ' could not be deleted.')\n error_count += 1\n logging.info('\\n')\n logging.info('\\nAccount deletion process completed with ' + str(error_count) + ' errors.')\n\n pass_list = create_ldap_accounts(needs_account)\n update_students_in_ps(needs_account, pass_list)\n conn.unbind()\n\n\ndef create_ldap_accounts(user_list):\n info, pass_list = convert_information(user_list)\n grade_level_list = deque([])\n for value in user_list.values():\n grade_level_list.append(value[2])\n\n make_info_files(info, grade_level_list)\n make_dynamic_ctl_files(grade_level_list)\n import_using_jrb()\n\n return pass_list\n\n\ndef convert_information(user_list):\n information_list = deque([])\n pass_list = dict()\n for k in user_list.keys():\n word_list = make_word_file()\n\n secure_random = SystemRandom()\n first_word = secure_random.choice(word_list).strip()\n second_word = secure_random.choice(word_list).strip()\n\n # check if the password is either too long or contains profanity\n pf = ProfanityFilter()\n while len(first_word + second_word) > 10 or len(first_word + second_word) < 6 or \\\n \"'\" in first_word or \"'\" in second_word or pf.is_profane(first_word) or pf.is_profane(second_word):\n first_word = secure_random.choice(word_list).strip()\n second_word = secure_random.choice(word_list).strip()\n\n pwd = (first_word + second_word + str(randint(0, 9)) + str(randint(0, 9)))\n pass_list[k] = pwd\n\n first_name = user_list[k][0].title()\n last_name = user_list[k][1].title()\n full_name = first_name + ' ' + last_name\n email = k + \"@snakeriver.org\"\n information = pwd + ',' + k + ',' + full_name + ',' + last_name + ',' + first_name + ',' + email\n\n information_list.append(information)\n\n return information_list, pass_list\n\n\ndef search(lst, item):\n item += '\\n'\n i = bisect_left(lst, item)\n if i != len(lst) and lst[i] == item:\n return i\n return None\n\n\n# generates a new username if one is taken\ndef resolve_username(curr_username, username_list, first_name, last_name, graduation_year, category):\n last_name_partition = last_name\n num_attempts = 0\n\n if category == 'student':\n while search(username_list, curr_username) is not None:\n logging.info('\\nUsername ' + curr_username + ' exists.')\n\n # extreme edge case - all possible usernames are taken\n if num_attempts > 15:\n logging.info('Username could not be resolved.')\n sys.exit(1)\n\n if len(curr_username) < 8:\n curr_username = str(graduation_year)[2:] + last_name + first_name[:8 - len(curr_username) + 1]\n else:\n last_name_partition = last_name_partition[:-1]\n first_name_partition = first_name[:6 - len(last_name_partition)]\n curr_username = str(graduation_year)[2:] + last_name_partition + first_name_partition\n\n num_attempts += 1\n else:\n while search(username_list, curr_username) is not None:\n logging.info('\\nUsername ' + curr_username + ' exists.')\n\n # extreme edge case - all possible usernames are taken\n if num_attempts > 15:\n logging.info('Username could not be resolved.')\n sys.exit(1)\n\n if len(curr_username) < 8:\n curr_username = last_name[:4] + first_name[:4]\n else:\n last_name_partition = last_name_partition[:-1]\n first_name_partition = first_name[:8 - len(last_name_partition)]\n curr_username = last_name_partition = first_name_partition\n\n num_attempts += 1\n\n logging.info('Username modified to: ' + curr_username)\n return curr_username\n\n\n# Rather than pulling the word list from the website every time,\n# it would be much faster to just read the list from a local txt file.\n# make_word_file opens the word list file if it exists, and makes the\n# file if it doesn't exist.\n# returns the list of possible words\ndef make_word_file():\n word_list_path = os.path.join(sys.path[0], 'snake_river_word_list.txt')\n\n try:\n f = open(word_list_path)\n word_list = f.readlines()\n f.close()\n except FileNotFoundError:\n logging.info(\"File not found. Generating word list file...\")\n word_site = \"http://svnweb.freebsd.org/csrg/share/dict/words?view=co&content-type=text/plain\"\n response = requests.get(word_site)\n word_list = response.content.splitlines()\n\n for i in range(0, len(word_list)):\n word_list[i] = word_list[i].decode('utf-8')\n\n with open(word_list_path, mode='w+', encoding='utf-8') as myfile:\n myfile.write('\\n'.join(word_list))\n\n return word_list\n\n\n# Return false if the potential username is in ldap already, return true otherwise\n# cn = username, ou = Grade##, o = snakeriver\ndef check_name_in_ldap(candidate):\n server = Server(host='virgil-2.snakeriver.org', port=636, use_ssl=True, get_info=NONE)\n conn = Connection(server, read_only=True)\n conn.bind()\n\n conn.search(search_base='o=snakeriver', search_filter='(uid=' + candidate + ')')\n if len(conn.entries) > 0:\n logging.info('Username exists in ldap: ')\n logging.info(conn.entries[0])\n return False\n logging.info('Username not found in ldap.')\n return True\n\n\n# Creates comma delimited .dat files to import to JRB\ndef make_info_files(information, grade_level_list):\n file_path = 'c:\\\\jrb\\\\account_info'\n if not os.path.exists('c:\\\\jrb'):\n os.makedirs('c:\\\\jrb')\n\n # Delete all of the previous account info files, so that we only have accounts that need to be added\n for filename in glob.glob(file_path + '*'):\n try:\n os.remove(filename)\n except OSError:\n logging.info('File ' + filename + ' is currently in use. Close the file and try again.')\n\n for grade in grade_level_list:\n new_path = file_path + '_Grade' + str(grade).zfill(2) + '.dat'\n if os.path.exists(new_path):\n with open(new_path, mode='a') as f:\n f.write(information[0] + '\\n')\n else:\n with open(new_path, mode='w') as f:\n f.write(information[0] + '\\n')\n\n information.popleft()\n\n\n# Dynamically creates ctl files to know which Context (Grade level) to add the students to\ndef make_dynamic_ctl_files(grade_level_list):\n dynamic_file_path = 'c:\\\\jrb\\\\dynamic_ctl_file.ctl'\n\n template = ['\\t\\tSEPARATOR=,', '\\t\\tUSER TEMPLATE=Y', '\\t\\tUse two passes=Y', 'FIELDS', '\\tPassword',\n '\\tName', '\\tFull Name', '\\tLast Name', '\\tGiven Name', '\\tInternet Email Address']\n\n # Delete all of the dynamic ctl files, so that we only have templates for the grades we need to upload\n for filename in glob.glob('c:\\\\jrb\\\\dynamic_ctl_file*'):\n try:\n os.remove(filename)\n except OSError:\n logging.info('File ' + filename + ' is currently in use. Close the file and try again.')\n\n while len(grade_level_list) != 0:\n with open(dynamic_file_path, mode='w') as dynamic_file:\n dynamic_file.write('IMPORT CONTROL\\n')\n dynamic_file.write('\\t\\tNAME CONTEXT=\"' + '.Grade-' + str(grade_level_list[0]).zfill(2) + '.Snakeriver\"\\n')\n dynamic_file.write('\\n'.join(template))\n\n if not os.path.exists('c:\\\\jrb\\\\dynamic_ctl_file' + '_Grade' + str(grade_level_list[0]).zfill(2) + '.ctl'):\n os.rename(dynamic_file_path, 'c:\\\\jrb\\\\dynamic_ctl_file' +\n '_Grade' + str(grade_level_list[0]).zfill(2) + '.ctl')\n\n grade_level_list.popleft()\n\n\n# Create user accounts through JRButils using the created info and ctl files\ndef import_using_jrb():\n directory = 'c:\\\\jrb'\n\n info_file_list = []\n ctl_file_list = []\n\n for filename in glob.glob(directory + '\\\\account_info_Grade*'):\n insort_left(info_file_list, filename)\n for filename in glob.glob(directory + '\\\\dynamic_ctl_file_Grade*'):\n insort_left(ctl_file_list, filename)\n\n for i in range(0, len(info_file_list)):\n curr_info_file = info_file_list[i]\n curr_ctl_file = ctl_file_list[i]\n call(['c:\\\\jrb\\\\Part_4\\\\jrbimprt.exe', curr_ctl_file, curr_info_file,\n '/$', '/e', '/v', '/x=10'])\n\n\n# Take the newly added first graders and update PowerSchool fields\ndef update_students_in_ps(user_list, pass_list):\n cnopts = pysftp.CnOpts()\n cnopts.hostkeys = None\n # *** ENTER FTP USERNAME AND PASSWORD HERE\n srv = pysftp.Connection(host='10.110.204.14', username='_____', password='_____', port=22, cnopts=cnopts)\n\n directory = os.getcwd()\n filename = os.path.join(directory, 'new_stds.txt')\n\n if len(user_list) == 0:\n logging.info('No students need to be updated.')\n return\n\n with open(filename, mode='a+') as new_stds:\n # new_stds.write('student_number\\tWeb_ID\\tWeb_Password\\tAllowWebAccess\\t' +\n # 'Student_Web_ID\\tStudent_Web_Password\\tStudent_AllowWebAccess\\tLunch_ID\\n')\n for k in user_list.keys():\n password = pass_list[k]\n student_number = user_list[k][4]\n new_stds.write(student_number)\n new_stds.write('\\t')\n new_stds.write(str(excel_date(user_list[k][3])) + user_list[k][0][:3].lower())\n new_stds.write('\\t')\n new_stds.write(student_number)\n new_stds.write('\\t')\n new_stds.write('1')\n new_stds.write('\\t')\n new_stds.write(k)\n new_stds.write('\\t')\n new_stds.write(password)\n new_stds.write('\\t')\n new_stds.write('1')\n new_stds.write('\\t')\n new_stds.write(student_number)\n new_stds.write('\\n')\n\n new_stds.close()\n\n srv.put(filename, '/Steve/new_stds.txt', preserve_mtime=True)\n\n srv.close()\n\n\n# Generates a parent's username by converting student's birthday to Excel ordinal\ndef excel_date(date):\n temp = datetime.datetime(1899, 12, 31)\n delta = datetime.datetime.strptime(date, '%m/%d/%Y') - temp\n return int(delta.days) + int(int(delta.seconds) / 86400) + 1\n\n\n# templates\n# user[first_name, last_name, curr_grade, birthday, student_id]\n# resolve_username(curr_username, username_list, first_name, last_name, graduation_year, category):\ndef handle_new_ps_users(needs_username_list):\n new_username_list = dict()\n empty_dict = dict()\n\n ldap_un_list = compare_to_ldap(empty_dict, needs_ps_username=1)\n for user in needs_username_list:\n graduation_year = (datetime.date.today().year + (12 - user[2]) + 1)\n first_name_split = split_name(user[0])\n last_name_split = split_name(user[1])\n\n if len(last_name_split) >= 5:\n candidate = str(graduation_year)[2:] + last_name_split[:5] + first_name_split[0]\n while (candidate in ldap_un_list) or (candidate in new_username_list.keys()):\n candidate = \\\n resolve_username(candidate, ldap_un_list + list(new_username_list.keys()), first_name_split,\n last_name_split[:5], graduation_year, 'student')\n else:\n candidate = str(graduation_year)[2:] + last_name_split + first_name_split[0]\n while (candidate in ldap_un_list) or (candidate in new_username_list.keys()):\n candidate = \\\n resolve_username(candidate, ldap_un_list + list(new_username_list.keys()), first_name_split,\n last_name_split, graduation_year, 'student')\n\n new_username_list[candidate] = [user[0], user[1], user[2], user[3], user[4]]\n\n pass_list = create_ldap_accounts(new_username_list)\n update_students_in_ps(new_username_list, pass_list)\n \n \n# main function\ndef create_user():\n while True:\n logging.info('\\n')\n logging.info('1) Run the manual student creation utility ' +\n '(creates student in LDAP. Will have to be manually added to PowerSchool)')\n logging.info('2) Run the automated student creation/deletion utility (also updates student info in PowerSchool)')\n logging.info('3) Quit')\n menu_prompt = int(input().strip())\n\n if menu_prompt == 1:\n # print(\"Are you creating a student or staff account?: \")\n # user_prompt = input().lower().strip()\n # delete the line below once staff is implemented\n user_prompt = \"student\"\n\n if not (user_prompt == \"student\") and not (user_prompt == \"staff\"):\n logging.info(\"You must enter either student or staff.\")\n continue\n else:\n if user_prompt == \"student\":\n student_information_list = deque([])\n grade_level_list = deque([])\n information, grade_levels = create_student(student_information_list, grade_level_list)\n # else:\n # create_staff()\n\n logging.info(\"\\nWould you like to create another account?(y/n): \")\n user_prompt = input().lower().strip()\n while not ((user_prompt == 'y') or (user_prompt == 'yes') or\n (user_prompt == 'n') or (user_prompt == 'no')):\n logging.info(\"You must enter y, yes, n, or no.\")\n logging.info(\"Would you like to create another account?(y/n): \")\n user_prompt = input().lower().strip()\n if (user_prompt == 'y') or (user_prompt == 'yes'):\n continue\n elif (user_prompt == 'n') or (user_prompt == 'no'):\n # Finally add the students that we created through this program to LDAP\n make_info_files(information, grade_levels)\n make_dynamic_ctl_files(grade_levels)\n import_using_jrb()\n elif menu_prompt == 2:\n ps_user_list, needs_username_list = usernames_from_sftp()\n compare_to_ldap(ps_user_list)\n \n if len(needs_username_list) > 0:\n handle_new_ps_users(needs_username_list)\n elif menu_prompt == 3:\n sys.exit(0)\n else:\n logging.info('Only 1, 2, or 3 may be entered.')\n continue\n\n\nif __name__ == '__main__':\n log_formatter = logging.Formatter('%(asctime)-15s %(levelname)-8s %(message)s')\n\n logFile = 'logfile.txt'\n\n h1 = RotatingFileHandler(filename=logFile, mode='a', maxBytes=20 * 1024 * 1024, backupCount=2)\n h1.setFormatter(log_formatter)\n h1.setLevel(logging.DEBUG)\n\n h2 = logging.StreamHandler(sys.stdout)\n h2.setLevel(logging.INFO)\n h2.setFormatter(logging.Formatter('%(message)s'))\n\n root = logging.getLogger()\n root.setLevel(logging.DEBUG)\n\n root.addHandler(h1)\n root.addHandler(h2)\n\n create_user()\n\n for handler in root.handlers:\n handler.close()\n root.removeFilter(handler)\n\n# Currently not in use\n#\n# def generate_staff_username_list():\n#\n# def create_staff():\n# while True:\n# print(\"Enter the staff's First name: \")\n# first_name = input().strip()\n# while check_name(first_name) is False:\n# print(\"A name must be alphabetical, or contain hyphens,spaces, or apostrophes.\")\n# print(\"Enter the staff's First name: \")\n# first_name = input().strip()\n#\n# print(\"Enter the staff's Last name: \")\n# last_name = input().strip()\n# while check_name(last_name) is False:\n# print(\"A name must be alphabetical, or contain hyphens, spaces, or apostrophes.\")\n# print(\"Enter the staff's Last name: \")\n# last_name = input().strip()\n#\n# while last_name == '':\n# print(\"A name cannot be empty.\")\n# print(\"Enter the staff's First name: \")\n# last_name = input()\n#\n# candidate = last_name[:4] + first_name[:4]\n# print('\\n' + candidate)\n#\n# username_list = generate_student_username_list()\n#\n# candidate = resolve_username(candidate, username_list, first_name, last_name, 0, 'staff')\n#\n# word_list = make_word_file()\n#\n# secure_random = SystemRandom()\n# first_word = secure_random.choice(word_list)\n# second_word = secure_random.choice(word_list)\n#\n# # check to see if the words are profane\n# pf = ProfanityFilter()\n#\n# while pf.is_profane(first_word) is True:\n# first_word = secure_random.choice(word_list)\n# while pf.is_profane(second_word) is True:\n# second_word = secure_random.choice(word_list)\n#\n# pwd = (first_word.strip() + second_word.strip() +\n# str(randint(0, 9)) + str(randint(0, 9)))\n# print(pwd)\n#\n# full_name = first_name.title() + ' ' + last_name.title()\n# email = candidate + \"@snakeriver.org\"\n# print(\"\\nInformation:\")\n# print(pwd + ',' + candidate + ',' + full_name + ','\n# + last_name.title() + ',' + first_name.title() + ',' + email)\n#\n# print(\"\\nDo you want to keep creating staff accounts?(y/n): \")\n# user_prompt = input().lower()\n# while not ((user_prompt == 'y') or (user_prompt == 'yes') or\n# (user_prompt == 'n') or (user_prompt == 'no')):\n# print(\"You must enter y, yes, n, or no.\")\n# print(\"Do you want to keep creating staff accounts?(y/n): \")\n# user_prompt = input().lower()\n# if (user_prompt == 'y') or (user_prompt == 'yes'):\n# continue\n# elif (user_prompt == 'n') or (user_prompt == 'no'):\n# return\n# Grabs all student usernames from PowerSchool\n# def generate_student_username_list():\n# student_list_path = os.path.join(sys.path[0], 'student.export.TEXT')\n# student_username_list = os.path.join(sys.path[0], 'student_usernames.txt')\n#\n# # try:\n# # f = open(student_username_list)\n# # l1 = f.readlines()\n# # f.close()\n# # except FileNotFoundError:\n# # print('\\nUsername file not found.')\n#\n# print('\\nEnter your PowerSchool username: ')\n# ps_username = input()\n#\n# while ps_username == '':\n# print('Usernames cannot be empty.')\n# print('\\nEnter your PowerSchool username: ')\n# ps_username = input()\n#\n# ps_password = getpass.getpass('PowerSchool password: ')\n# url = 'https://ps.snakeriver.org/admin/pw.html'\n#\n# # setup the browser profile so that the file downloads to the right place without asking\n# profile = webdriver.FirefoxProfile()\n# profile.set_preference('browser.download.folderList', 2)\n# profile.set_preference('browser.download.manager.showWhenStarting', False)\n# profile.set_preference('browser.download.dir', sys.path[0])\n# profile.set_preference('browser.helperApps.neverAsk.saveToDisk', 'text/ps-export')\n# profile.set_preference('browser.helperApps.alwaysAsk.force', False)\n# profile.set_preference('browser.helperApps.neverAsk.openFile', 'text/ps-export')\n# profile.set_preference('browser.download.manager.useWindow', False)\n# profile.set_preference('browser.download.manager.focusWhenStarting', False)\n# profile.set_preference('browser.download.manager.alertOnEXEOpen', False)\n# profile.set_preference('browser.download.manager.showAlertOnComplete', False)\n# profile.set_preference('browser.download.manager.closeWhenDone', True)\n#\n# driver = webdriver.Firefox(firefox_profile=profile,\n# executable_path='C:\\\\Users\\\\pagejord\\\\PycharmProjects\\\\geckodriver.exe')\n# driver.get(url)\n#\n# username = driver.find_element_by_name('username')\n# password = driver.find_element_by_name('password')\n# username.send_keys(ps_username)\n# password.send_keys(ps_password)\n#\n# driver.find_element_by_name('LoginForm').submit()\n# delay = 10\n#\n# # This is where everything gets ugly\n# # wait for the login to complete and the search button can be clicked\n# try:\n# WebDriverWait(driver, delay).until(EC.element_to_be_clickable((By.XPATH, '//*[@id=\"searchButton\"]')))\n# except TimeoutException:\n# print('Timed out waiting for search button to be clickable.')\n# print('Current timeout is: ' + str(delay) + ' seconds.')\n# sys.exit(1)\n#\n# sleep(2)\n# # click the search button to bring up the list of all students\n# driver.find_element_by_xpath('//*[@id=\"searchButton\"]').click()\n# sleep(2)\n#\n# # wait for the dropdown menu to load before attempting to choose the Export option\n# try:\n# driver.find_element_by_xpath('//*[@id=\"selectFunctionDropdownButton\"]').click()\n# WebDriverWait(driver, delay).until(EC.element_to_be_clickable((By.XPATH,\n# '// *[ @ id = \"lnk_ExportUsingTemplate\"]')))\n# except TimeoutException:\n# print('Timed out waiting for dropdown menu to load.')\n# print('Current timeout is: ' + str(delay) + ' seconds.')\n# sys.exit(1)\n#\n# driver.find_element_by_xpath('// *[ @ id = \"lnk_ExportUsingTemplate\"]').click()\n#\n# # Selects 'Students' in Export Using Template dropdown menu\n# sleep(2)\n# driver.find_element_by_xpath('/html/body/form/div[1]/div[3]/div[2]/div[3]/table/tbody/tr[2]/td[2]/select/ \\\n# option[2]').click()\n#\n# # wait for the new page to load, wait for \"The selected students\" option to load\n# try:\n# WebDriverWait(driver, delay).until(EC.element_to_be_clickable((By.XPATH, '/html/body/form/div[1]/div[3]/ \\\n# div[2]/div[3]/table/tbody/tr[4]/ \\\n# td[2]/p/input[2]')))\n# except TimeoutException:\n# print('Timed out waiting for template page to update.')\n# print('Current timeout is: ' + str(delay) + ' seconds.')\n# sys.exit(1)\n#\n# # selects the \"For Creating Student User Accs\" template and the \"All selected students\" option\n# driver.find_element_by_xpath('/html/body/form/div[1]/div[3]/div[2]/div[3]/table/tbody/tr[3]/td[2]/select/ \\\n# option[15]').click()\n#\n# driver.find_element_by_xpath('/html/body/form/div[1]/div[3]/div[2]/div[3]/table/tbody/tr[4]/td[2]/p/input[2]') \\\n# .click()\n#\n# # clicks the submit button to download the list to where the Python script runs\n# driver.find_element_by_xpath('//*[@id=\"btnSubmit\"]').click()\n#\n# # make sure the export file is actually downloaded before attempting to extract anything from it\n# while True:\n# if os.path.isfile('student.export.text.part'):\n# sleep(1)\n# continue\n# break\n#\n# driver.close()\n#\n# # finally, we can extract the list of student usernames\n# f = open(student_list_path)\n# l1 = []\n# with open(student_username_list, mode='a+') as username_file:\n# next(f)\n# for line in f.readlines():\n# if line.split(',')[9] == '':\n# continue\n# l1.append(line.split(',')[9])\n# l1.sort()\n# username_file.write('\\n'.join(l1))\n# f.close()\n#\n# return l1\n", "sub_path": "create_user.py", "file_name": "create_user.py", "file_ext": "py", "file_size_in_byte": 35269, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "logging.info", "line_number": 38, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 41, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 42, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 45, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 48, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 49, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 54, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 57, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 62, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 63, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 68, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 71, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 71, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 91, "usage_type": "call"}, {"api_name": "random.SystemRandom", "line_number": 95, "usage_type": "call"}, {"api_name": "profanityfilter.ProfanityFilter", "line_number": 100, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 106, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 110, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 113, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 115, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 119, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 120, "usage_type": "call"}, {"api_name": "string.ascii_letters", "line_number": 133, "usage_type": "attribute"}, {"api_name": "pysftp.CnOpts", "line_number": 152, "usage_type": "call"}, {"api_name": "pysftp.Connection", "line_number": 155, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 159, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 159, "usage_type": "name"}, {"api_name": "os.path.getcwd", "line_number": 159, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 163, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 189, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 191, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 193, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 194, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 196, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 198, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 199, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 203, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 203, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 206, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 208, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 209, "usage_type": "call"}, {"api_name": "ldap3.Server", "line_number": 214, "usage_type": "call"}, {"api_name": "ldap3.NONE", "line_number": 214, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 215, "usage_type": "call"}, {"api_name": "getpass.getpass", "line_number": 217, "usage_type": "call"}, {"api_name": "ldap3.Connection", "line_number": 218, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 221, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 222, "usage_type": "call"}, {"api_name": "getpass.getpass", "line_number": 224, "usage_type": "call"}, {"api_name": "ldap3.Connection", "line_number": 225, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 230, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 235, "usage_type": "call"}, {"api_name": "ldap3.SUBTREE", "line_number": 238, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 255, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 265, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 266, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 267, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 269, "usage_type": "call"}, {"api_name": "ldap3.SUBTREE", "line_number": 273, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 278, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 279, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 280, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 283, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 293, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 295, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 297, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 298, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 307, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 319, "usage_type": "call"}, {"api_name": "random.SystemRandom", "line_number": 324, "usage_type": "call"}, {"api_name": "profanityfilter.ProfanityFilter", "line_number": 329, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 335, "usage_type": "call"}, {"api_name": "bisect.bisect_left", "line_number": 351, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 364, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 368, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 369, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 381, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 385, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 386, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 397, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 407, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 407, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 407, "usage_type": "name"}, {"api_name": "sys.path", "line_number": 407, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 414, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 416, "usage_type": "call"}, {"api_name": "ldap3.Server", "line_number": 431, "usage_type": "call"}, {"api_name": "ldap3.NONE", "line_number": 431, "usage_type": "name"}, {"api_name": "ldap3.Connection", "line_number": 432, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 437, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 438, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 440, "usage_type": "call"}, {"api_name": "os.path.path.exists", "line_number": 447, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 447, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 447, "usage_type": "name"}, {"api_name": "os.path.makedirs", "line_number": 448, "usage_type": "call"}, {"api_name": "os.path", "line_number": 448, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 451, "usage_type": "call"}, {"api_name": "os.path.remove", "line_number": 453, "usage_type": "call"}, {"api_name": "os.path", "line_number": 453, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 455, "usage_type": "call"}, {"api_name": "os.path.path.exists", "line_number": 459, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 459, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 459, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 477, "usage_type": "call"}, {"api_name": "os.path.remove", "line_number": 479, "usage_type": "call"}, {"api_name": "os.path", "line_number": 479, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 481, "usage_type": "call"}, {"api_name": "os.path.path.exists", "line_number": 489, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 489, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 489, "usage_type": "name"}, {"api_name": "os.path.rename", "line_number": 490, "usage_type": "call"}, {"api_name": "os.path", "line_number": 490, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 503, "usage_type": "call"}, {"api_name": "bisect.insort_left", "line_number": 504, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 505, "usage_type": "call"}, {"api_name": "bisect.insort_left", "line_number": 506, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 511, "usage_type": "call"}, {"api_name": "pysftp.CnOpts", "line_number": 517, "usage_type": "call"}, {"api_name": "pysftp.Connection", "line_number": 520, "usage_type": "call"}, {"api_name": "os.path.getcwd", "line_number": 522, "usage_type": "call"}, {"api_name": "os.path", "line_number": 522, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 523, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 523, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 523, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 526, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 561, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 562, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 562, "usage_type": "attribute"}, {"api_name": "datetime.date.today", "line_number": 575, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 575, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 601, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 602, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 604, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 605, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 615, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 619, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 620, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 625, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 629, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 630, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 646, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 648, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 653, "usage_type": "call"}, {"api_name": "logging.handlers.RotatingFileHandler", "line_number": 657, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 659, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 661, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 661, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 662, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 663, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 665, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 666, "usage_type": "attribute"}]} +{"seq_id": "98354011", "text": "from utils.mask_functions import better_mask2rle, rle2mask\n\n\n\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport cv2\nimport numpy as np\nfrom tqdm import tqdm\n\n# experiments/smp_unet_sampler_1024accum_bug/best.h5 0.9 3800 do_tta False cut_borders False[change in conf]\nour_best = pd.read_csv(\"./submissions/submission_smp_unet_sampler_1024accum_bug.csv\", dtype=str)\n\n# smp_unet_sampler_alignaug_1024 5.0000000e-01 4.5000000e+03 do_tta True cut_borders from config\nbest_align = pd.read_csv(\"./submissions/submission_align_aug_tta_best.csv\", dtype=str)\n# smp_unet_sampler_alignaug_1024_div 5.5000001e-01 3.3000000e+03 do_tta True cut_borders from config\nbest_align_div = pd.read_csv(\"./submissions/submission_align_aug_div_tta_best.csv\", dtype=str)\n# experiments/smp_unet_sampler_1024accum_bug/best.h5 5.0000000e-01 3.6000000e+03 do_tta True cut_borders True[change in conf]\naccu_bug_best = pd.read_csv(\"./submissions/submission_accum_bug_tta_best.csv\", dtype=str)\n\nbest_new_dfs = [accu_bug_best, best_align, best_align_div]\n\n\ndfs = [our_best] + best_new_dfs\nout_values = []\nfor img_id in tqdm(our_best.ImageId.unique()):\n or_between = None\n skip = False\n for i, df in enumerate(dfs):\n val = df[df.ImageId == img_id][\"EncodedPixels\"].values[0]\n if val.strip() == \"-1\":\n skip = True\n break\n mask = (rle2mask(val, 1024, 1024) > 0).astype(np.uint8)\n if or_between is None:\n or_between = np.zeros_like(mask, dtype=np.bool)\n or_between |= mask > 0.5\n if skip:\n out_values.append([img_id, \"-1\"])\n else:\n out_values.append([img_id, better_mask2rle(or_between * 1 * 255)])\n\nout_best_values = out_values.copy()\n\nout_values_df = pd.DataFrame(out_best_values, columns=['ImageId', 'EncodedPixels'])\nprint((out_values_df.EncodedPixels == \"-1\").sum())\nout_values_df.to_csv(\"merge_best.csv\", index=None)\n", "sub_path": "do_merge.py", "file_name": "do_merge.py", "file_ext": "py", "file_size_in_byte": 1896, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pandas.read_csv", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 15, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 17, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 19, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 26, "usage_type": "call"}, {"api_name": "utils.mask_functions.rle2mask", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 34, "usage_type": "attribute"}, {"api_name": "numpy.zeros_like", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 36, "usage_type": "attribute"}, {"api_name": "utils.mask_functions.better_mask2rle", "line_number": 41, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "53958961", "text": "import numpy\nimport time\nfrom MDSplus import*\nimport os as path\nimport struct\nfrom pylab import*\nimport epics \nfrom epics import caget\nfrom epics import PV\n\n\t\n##################reading from file##########################################################\n\n\np = PV('xyz:waveDouble')\ndata = p.get()\nprint(data)\n#data=numpy.delete(data,[0:4],None)\n##################FORMING INDIVIDUAL ARRAY##########################################################\n\nsinewave = data[5::5]\ncosine = data[6::5]\nrandom = data[7::5]\nrandom_int64 = random.astype(numpy.int64)\nsawtooth = data[8::5]\ntimestamp = data[9::5]-20828448000000000 #Epoch\ntimestamp = timestamp-6311520000000000 #MDSPlus\ntimestamp = timestamp*100\n#timestamp_int64 = timestamp.astype(numpy.int64\n\n\n##################PRINTING INDIVIDUAL ARRAY##########################################################\n\nprint(len(sinewave))\nprint(len(cosine))\nprint(len(random))\nprint(len(sawtooth))\nprint(len(timestamp))\nprint(\"sinewave \\t \\t\", sinewave)\nprint(\"cosinewave \\t \\t\", cosine)\nprint(\"random \\t \\t\", random_int64)\nprint(\"sawtooth \\t \\t\", sawtooth)\nprint(\"timestamp \\t \\t\", timestamp)\n\n\n##################PRINT INDIVIDUAL ARRAY WITH X_AXIS AS TIMESTAMP##########################################################\n\n\n#plot(timestamp,sinewave)\n#plot(timestamp,cosine)\n#plot(timestamp,random)\n#plot(timestamp,sawtooth)\n#show()\n\n\n\n\n\n###################MDSPLUS SECTION############################################\n\n###################Reading shot.no############################################\nf = open('shot_no.bin','rb')\nshot_no = f.read()\nf.close()\nshot_no = float(shot_no)\nshot_no = int(shot_no)\n\n\n\n###################CHANNEL_1############################################\n\n###################TREE OPERATION############################################\nmyTree = Tree(\"finalpv \" , shot_no)\n\n\n###################GETTING NODE FOR COSINE############################################\n\nmyTree.setDefault(myTree.getNode('OFFLINE_DATA'))\nmyTree.setDefault(myTree.getNode('COSINE'))\nCOSINE = myTree.getNode('COSINE')\n\n###################STORING TIMESTAMP DATA###########################################\nfor i in range(0,len(cosine)):\n\t#storing data samples\n\tcosine_val = cosine[i]\n\ttimestamp_int64_val = timestamp[i]\n\tCOSINE.putRow(10000, Float64(cosine_val), float64(timestamp_int64_val))\n\n\n\n\n\n\n###################CHANNEL_2############################################\n\n###################TREE OPERATION############################################\nmyTree = Tree(\"finalpv \" , shot_no)\n\n\n###################GETTING NODE FOR RANDOM############################################\n\nmyTree.setDefault(myTree.getNode('OFFLINE_DATA'))\nmyTree.setDefault(myTree.getNode('RANDOM'))\nRANDOM = myTree.getNode('RANDOM')\n\n###################STORING TIMESTAMP DATA###########################################\nfor i in range(0,len(random)):\n\t#storing data samples\n\trandom_int64_val = random_int64[i]\n\ttimestamp_int64_val = timestamp[i]\n\tRANDOM.putRow(10000, Float64(random_int64_val), float64(timestamp_int64_val))\n\n\n\n\n\n###################CHANNEL_3############################################\n\n###################TREE OPERATION############################################\nmyTree = Tree(\"finalpv \" , shot_no)\n\n###################GETTING NODE FOR SINEWAVE############################################\n\nmyTree.setDefault(myTree.getNode('OFFLINE_DATA'))\nmyTree.setDefault(myTree.getNode('SINEWAVE'))\nSINEWAVE = myTree.getNode('SINEAVE')\n\n###################STORING TIMESTAMP DATA###########################################\nfor i in range(0,len(sinewave)):\n\t#storing data samples\n\tSINEWAVE_val = sinewave[i]\n\ttimestamp_int64_val = timestamp[i]\n\tSINEWAVE.putRow(10000, Float64(SINEWAVE_val), float64(timestamp_int64_val))\n\n\n\n\n\n\n###################CHANNEL_4############################################\n\n###################TREE OPERATION############################################\nmyTree = Tree(\"finalpv \" , shot_no)\n\n###################GETTING NODE FOR SAWTOOTH############################################\n\nmyTree.setDefault(myTree.getNode('OFFLINE_DATA'))\nmyTree.setDefault(myTree.getNode('SAWTOOTH'))\nSAWTOOTH = myTree.getNode('SAWTOOTH')\n\n###################STORING TIMESTAMP DATA###########################################\nfor i in range(0,len(sawtooth)):\n\t#storing data samples\n\tsawtooth_val = sawtooth[i]\n\ttimestamp_int64_val = timestamp[i]\n\tSAWTOOTH.putRow(10000, Float64(sawtooth_val), float64(timestamp_int64_val))\n\n\n\n\n\n\n##################################rough######################################################\n\n#myTree.setDefault(myTree.getNode('RANDOM'))\n\n#myTree.setDefault(myTree.getNode('sinewave'))\n#n2 = myTree.getNode('RANDOM' )\n#SIG_1 = myTree.getNode('SIG_1')\n\n#RAW.putData(Float64Array(voltage))\n#TIME.putData(Float64Array(timestamp))\n\n\n\n", "sub_path": "codac/py/data_remote.py", "file_name": "data_remote.py", "file_ext": "py", "file_size_in_byte": 4754, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "epics.PV", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 24, "usage_type": "attribute"}]} +{"seq_id": "53339362", "text": "#!/usr/bin/env python3.7\n\nrecipes = {\n\t\"2H Weapon - Agility\": {\n \"ID\": 27837,\n \"Learn\": 290,\n \"Yellow\": 310,\n \"Green\": 330,\n \"Grey\": 350,\n \"Source\": \"Reputation\",\n \"RecipeID\": 22392,\n \"Reagents\": {\n \"Large Brilliant Shard\": 10,\n \"Greater Eternal Essence\": 6,\n\t\t\t\"Illusion Dust\": 14,\n\t\t\t\"Essence of Air\": 4\n }\n },\n \"2H Weapon - Greater Impact\": {\n \"ID\": 13937,\n \"Learn\": 240,\n \"Yellow\": 260,\n \"Green\": 280,\n \"Grey\": 300,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Large Radiant Shard\": 2,\n\t\t\t\"Dream Dust\": 2\n }\n },\n \"2H Weapon - Impact\": {\n \"ID\": 13695,\n \"Learn\": 200,\n \"Yellow\": 220,\n \"Green\": 240,\n \"Grey\": 260,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Vision Dust\": 4,\n\t\t\t\"Large Glowing Shard\": 1\n }\n },\n \"2H Weapon - Lesser Impact\": {\n \"ID\": 13529,\n \"Learn\": 145,\n \"Yellow\": 170,\n \"Green\": 190,\n \"Grey\": 210,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Soul Dust\": 3,\n\t\t\t\"Large Glimmering Shard\": 1\n }\n },\n \"2H Weapon - Lesser Intellect\": {\n \"ID\": 7793,\n \"Learn\": 100,\n \"Yellow\": 130,\n \"Green\": 150,\n \"Grey\": 170,\n \"Source\": \"VendorLimited\",\n \"RecipeID\": 6349,\n \"Reagents\": {\n\t\t\t\"Greater Magic Essence\": 3\n }\n },\n \"2H Weapon - Lesser Spirit\": {\n \"ID\": 13380,\n \"Learn\": 110,\n \"Yellow\": 135,\n \"Green\": 155,\n \"Grey\": 175,\n \"Source\": \"Drop\",\n \"RecipeID\": 11038,\n \"Reagents\": {\n\t\t\t\"Lesser Astral Essence\": 1,\n\t\t\t\"Strange Dust\": 6\n }\n },\n \"2H Weapon - Minor Impact\": {\n \"ID\": 7745,\n \"Learn\": 100,\n \"Yellow\": 130,\n \"Green\": 150,\n \"Grey\": 170,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Strange Dust\": 4,\n\t\t\t\"Small Glimmering Shard\": 1\n }\n },\n \"2H Weapon - Superior Impact\": {\n \"ID\": 20030,\n \"Learn\": 295,\n \"Yellow\": 315,\n \"Green\": 335,\n \"Grey\": 355,\n \"Source\": \"Drop\",\n \"RecipeID\": 16247,\n \"Reagents\": {\n\t\t\t\"Large Brilliant Shard\": 4,\n\t\t\t\"Illusion Dust\": 10\n }\n },\n \"Boots - Agility\": {\n \"ID\": 13935,\n \"Learn\": 235,\n \"Yellow\": 255,\n \"Green\": 275,\n \"Grey\": 295,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Greater Nether Essence\": 2\n }\n },\n \"Boots - Greater Agility\": {\n \"ID\": 20023,\n \"Learn\": 295,\n \"Yellow\": 315,\n \"Green\": 335,\n \"Grey\": 355,\n \"Source\": \"Drop\",\n \"RecipeID\": 16245,\n \"Reagents\": {\n\t\t\t\"Greater Eternal Essence\": 8\n }\n },\n \"Boots - Greater Stamina\": {\n \"ID\": 20020,\n \"Learn\": 260,\n \"Yellow\": 280,\n \"Green\": 300,\n \"Grey\": 320,\n \"Source\": \"Drop\",\n \"RecipeID\": 16215,\n \"Reagents\": {\n\t\t\t\"Dream Dust\": 10\n }\n },\n \"Boots - Lesser Agility\": {\n \"ID\": 13637,\n \"Learn\": 160,\n \"Yellow\": 180,\n \"Green\": 200,\n \"Grey\": 220,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Soul Dust\": 1,\n\t\t\t\"Lesser Mystic Essence\": 1\n }\n },\n \"Boots - Lesser Spirit\": {\n \"ID\": 13687,\n \"Learn\": 190,\n \"Yellow\": 210,\n \"Green\": 230,\n \"Grey\": 250,\n \"Source\": \"Drop\",\n \"RecipeID\": 11167,\n \"Reagents\": {\n\t\t\t\"Greater Mystic Essence\": 1,\n\t\t\t\"Lesser Mystic Essence\": 2\n }\n },\n \"Boots - Lesser Stamina\": {\n \"ID\": 13644,\n \"Learn\": 170,\n \"Yellow\": 190,\n \"Green\": 210,\n \"Grey\": 230,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Soul Dust\": 4\n }\n },\n \"Boots - Minor Agility\": {\n \"ID\": 7867,\n \"Learn\": 125,\n \"Yellow\": 150,\n \"Green\": 170,\n \"Grey\": 190,\n \"Source\": \"Vendor\",\n \"RecipeID\": 6377,\n \"Reagents\": {\n\t\t\t\"Strange Dust\": 6,\n\t\t\t\"Lesser Astral Essence\": 2\n }\n },\n \"Boots - Minor Speed\": {\n \"ID\": 13890,\n \"Learn\": 225,\n \"Yellow\": 245,\n \"Green\": 265,\n \"Grey\": 285,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Small Radiant Shard\": 1,\n\t\t\t\"Aquamarine\": 1,\n\t\t\t\"Lesser Nether Essence\": 1\n }\n },\n \"Boots - Minor Stamina\": {\n \"ID\": 7863,\n \"Learn\": 125,\n \"Yellow\": 150,\n \"Green\": 170,\n \"Grey\": 190,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Strange Dust\": 8\n }\n },\n \"Boots - Spirit\": {\n \"ID\": 20024,\n \"Learn\": 275,\n \"Yellow\": 295,\n \"Green\": 315,\n \"Grey\": 335,\n \"Source\": \"Drop\",\n \"RecipeID\": 16220,\n \"Reagents\": {\n\t\t\t\"Greater Eternal Essence\": 2,\n\t\t\t\"Lesser Eternal Essence\": 1\n }\n },\n \"Boots - Stamina\": {\n \"ID\": 13836,\n \"Learn\": 215,\n \"Yellow\": 235,\n \"Green\": 255,\n \"Grey\": 275,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Vision Dust\": 5\n }\n },\n \"Bracer - Deflection\": {\n \"ID\": 13931,\n \"Learn\": 235,\n \"Yellow\": 255,\n \"Green\": 275,\n \"Grey\": 295,\n \"Source\": \"Vendor\",\n \"RecipeID\": 11223,\n \"Reagents\": {\n\t\t\t\"Greater Nether Essence\": 1,\n\t\t\t\"Dream Dust\": 2\n }\n },\n \"Bracer - Greater Intellect\": {\n \"ID\": 20008,\n \"Learn\": 255,\n \"Yellow\": 275,\n \"Green\": 295,\n \"Grey\": 315,\n \"Source\": \"Drop\",\n \"RecipeID\": 16214,\n \"Reagents\": {\n\t\t\t\"Lesser Eternal Essence\": 3\n }\n },\n \"Bracer - Greater Spirit\": {\n \"ID\": 13846,\n \"Learn\": 220,\n \"Yellow\": 240,\n \"Green\": 260,\n \"Grey\": 280,\n \"Source\": \"Drop\",\n \"RecipeID\": 11204,\n \"Reagents\": {\n\t\t\t\"Lesser Nether Essence\": 3,\n\t\t\t\"Vision Dust\": 1\n }\n },\n \"Bracer - Greater Stamina\": {\n \"ID\": 13945,\n \"Learn\": 245,\n \"Yellow\": 165,\n \"Green\": 285,\n \"Grey\": 305,\n \"Source\": \"Drop\",\n \"RecipeID\": 11225,\n \"Reagents\": {\n\t\t\t\"Dream Dust\": 5\n }\n },\n \"Bracer - Greater Strength\": {\n \"ID\": 13939,\n \"Learn\": 240,\n \"Yellow\": 260,\n \"Green\": 280,\n \"Grey\": 300,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Dream Dust\": 2,\n\t\t\t\"Greater Nether Essence\": 1\n }\n },\n \"Bracer - Intellect\": {\n \"ID\": 13822,\n \"Learn\": 210,\n \"Yellow\": 230,\n \"Green\": 250,\n \"Grey\": 270,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Lesser Nether Essence\": 2\n }\n },\n \"Bracer - Lesser Deflection\": {\n \"ID\": 13646,\n \"Learn\": 170,\n \"Yellow\": 190,\n \"Green\": 210,\n \"Grey\": 230,\n \"Source\": \"Vendor\",\n \"RecipeID\": 11163,\n \"Reagents\": {\n\t\t\t\"Lesser Mystic Essence\": 1,\n\t\t\t\"Soul Dust\": 2\n }\n },\n \"Bracer - Lesser Intellect\": {\n \"ID\": 13622,\n \"Learn\": 150,\n \"Yellow\": 175,\n \"Green\": 195,\n \"Grey\": 215,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Greater Astral Essence\": 2\n }\n },\n \"Bracer - Lesser Spirit\": {\n \"ID\": 7859,\n \"Learn\": 120,\n \"Yellow\": 145,\n \"Green\": 165,\n \"Grey\": 185,\n \"Source\": \"Drop\",\n \"RecipeID\": 6375,\n \"Reagents\": {\n\t\t\t\"Lesser Astral Essence\": 2\n }\n },\n \"Bracer - Lesser Stamina\": {\n \"ID\": 13501,\n \"Learn\": 130,\n \"Yellow\": 155,\n \"Green\": 175,\n \"Grey\": 195,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Soul Dust\": 2\n }\n },\n \"Bracer - Lesser Strength\": {\n \"ID\": 13536,\n \"Learn\": 140,\n \"Yellow\": 165,\n \"Green\": 185,\n \"Grey\": 205,\n \"Source\": \"Vendor\",\n \"RecipeID\": 11101,\n \"Reagents\": {\n\t\t\t\"Soul Dust\": 2\n }\n },\n \"Bracer - Mana Regeneration\": {\n \"ID\": 23801,\n \"Learn\": 290,\n \"Yellow\": 310,\n \"Green\": 330,\n \"Grey\": 350,\n \"Source\": \"Reputation\",\n \"RecipeID\": 19446,\n \"Reagents\": {\n\t\t\t\"Illusion Dust\": 16,\n\t\t\t\"Greater Eternal Essence\": 4,\n\t\t\t\"Essence of Water\": 2\n }\n },\n \"Bracer - Minor Agility\": {\n \"ID\": 7779,\n \"Learn\": 80,\n \"Yellow\": 115,\n \"Green\": 135,\n \"Grey\": 155,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Strange Dust\": 2,\n\t\t\t\"Greater Magic Essence\": 1\n }\n },\n \"Bracer - Minor Deflect\": {\n \"ID\": 7428,\n \"Learn\": 1,\n \"Yellow\": 80,\n \"Green\": 100,\n \"Grey\": 120,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Lesser Magic Essence\": 1,\n\t\t\t\"Strange Dust\": 1\n }\n },\n \"Bracer - Minor Health\": {\n \"ID\": 7418,\n \"Learn\": 1,\n \"Yellow\": 70,\n \"Green\": 90,\n \"Grey\": 110,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Strange Dust\": 1\n }\n },\n \"Bracer - Minor Spirit\": {\n \"ID\": 7766,\n \"Learn\": 60,\n \"Yellow\": 105,\n \"Green\": 125,\n \"Grey\": 145,\n \"Source\": \"Drop\",\n \"RecipeID\": 6344,\n \"Reagents\": {\n\t\t\t\"Lesser Magic Essence\": 2\n }\n },\n \"Bracer - Minor Stamina\": {\n \"ID\": 7457,\n \"Learn\": 50,\n \"Yellow\": 100,\n \"Green\": 120,\n \"Grey\": 140,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Strange Dust\": 3\n }\n },\n \"Bracer - Minor Strength\": {\n \"ID\": 7782,\n \"Learn\": 80,\n \"Yellow\": 115,\n \"Green\": 135,\n \"Grey\": 155,\n \"Source\": \"Drop\",\n \"RecipeID\": 6347,\n \"Reagents\": {\n\t\t\t\"Strange Dust\": 5\n }\n },\n \"Bracer - Spirit\": {\n \"ID\": 13642,\n \"Learn\": 165,\n \"Yellow\": 185,\n \"Green\": 205,\n \"Grey\": 225,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Lesser Mystic Essence\": 1\n }\n },\n \"Bracer - Stamina\": {\n \"ID\": 13648,\n \"Learn\": 170,\n \"Yellow\": 190,\n \"Green\": 210,\n \"Grey\": 230,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Soul Dust\": 6\n }\n },\n \"Bracer - Strength\": {\n \"ID\": 13661,\n \"Learn\": 180,\n \"Yellow\": 200,\n \"Green\": 220,\n \"Grey\": 240,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Vision Dust\": 1\n }\n },\n \"Bracer - Superior Spirit\": {\n \"ID\": 20009,\n \"Learn\": 270,\n \"Yellow\": 290,\n \"Green\": 310,\n \"Grey\": 330,\n \"Source\": \"Drop\",\n \"RecipeID\": 16218,\n \"Reagents\": {\n\t\t\t\"Lesser Eternal Essence\": 3,\n\t\t\t\"Dream Dust\": 10\n }\n },\n \"Bracer - Superior Strength\": {\n \"ID\": 20010,\n \"Learn\": 295,\n \"Yellow\": 315,\n \"Green\": 335,\n \"Grey\": 355,\n \"Source\": \"Drop\",\n \"RecipeID\": 16246,\n \"Reagents\": {\n\t\t\t\"Illusion Dust\": 6,\n\t\t\t\"Greater Eternal Essence\": 6\n }\n },\n \"Brilliant Mana Oil\": {\n \"ID\": 25130,\n \"Learn-PHASE5\": 300,\n \"Yellow\": 310,\n \"Green\": 320,\n \"Grey\": 330,\n \"Source\": \"Reputation\",\n \"RecipeID\": 20757,\n \"Reagents\": {\n\t\t\t\"Large Brilliant Shard\": 2,\n \"Purple Lotus\": 3,\n \"Imbued Vial\": 1\n }\n },\n \"Brilliant Wizard Oil\": {\n \"ID\": 25129,\n \"Learn-PHASE5\": 300,\n \"Yellow\": 310,\n \"Green\": 320,\n \"Grey\": 330,\n \"Source\": \"Reputation\",\n \"RecipeID\": 20756,\n \"Reagents\": {\n\t\t\t\"Large Brilliant Shard\": 2,\n \"Firebloom\": 3,\n \"Imbued Vial\": 1\n }\n },\n \"Chest - Greater Health\": {\n \"ID\": 13640,\n \"Learn\": 160,\n \"Yellow\": 180,\n \"Green\": 200,\n \"Grey\": 220,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Soul Dust\": 3\n }\n },\n \"Chest - Greater Mana\": {\n \"ID\": 13663,\n \"Learn\": 185,\n \"Yellow\": 205,\n \"Green\": 225,\n \"Grey\": 245,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Greater Mystic Essence\": 1\n }\n },\n \"Chest - Health\": {\n \"ID\": 7857,\n \"Learn\": 120,\n \"Yellow\": 145,\n \"Green\": 165,\n \"Grey\": 185,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Strange Dust\": 4,\n\t\t\t\"Lesser Astral Essence\": 1\n }\n },\n \"Chest - Lesser Absorption\": {\n \"ID\": 13538,\n \"Learn\": 140,\n \"Yellow\": 165,\n \"Green\": 185,\n \"Grey\": 205,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Strange Dust\": 2,\n\t\t\t\"Greater Astral Essence\": 1,\n\t\t\t\"Large Glimmering Shard\": 1\n }\n },\n \"Chest - Lesser Health\": {\n \"ID\": 7748,\n \"Learn\": 60,\n \"Yellow\": 105,\n \"Green\": 125,\n \"Grey\": 145,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Strange Dust\": 2,\n\t\t\t\"Lesser Magic Essence\": 2\n }\n },\n \"Chest - Lesser Mana\": {\n \"ID\": 7776,\n \"Learn\": 80,\n \"Yellow\": 115,\n \"Green\": 135,\n \"Grey\": 155,\n \"Source\": \"Vendor\",\n \"RecipeID\": 6346,\n \"Reagents\": {\n\t\t\t\"Greater Magic Essence\": 1,\n\t\t\t\"Lesser Magic Essence\": 1\n }\n },\n \"Chest - Lesser Stats\": {\n \"ID\": 13700,\n \"Learn\": 200,\n \"Yellow\": 220,\n \"Green\": 240,\n \"Grey\": 260,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Greater Mystic Essence\": 2,\n\t\t\t\"Vision Dust\": 2,\n\t\t\t\"Large Glowing Shard\": 1\n }\n },\n \"Chest - Major Health\": {\n \"ID\": 20026,\n \"Learn\": 275,\n \"Yellow\": 295,\n \"Green\": 315,\n \"Grey\": 335,\n \"Source\": \"VendorLimited\",\n \"RecipeID\": 16221,\n \"Reagents\": {\n\t\t\t\"Illusion Dust\": 6,\n\t\t\t\"Small Brilliant Shard\": 1\n }\n },\n \"Chest - Major Mana\": {\n \"ID\": 20028,\n \"Learn\": 290,\n \"Yellow\": 310,\n \"Green\": 330,\n \"Grey\": 350,\n \"Source\": \"Drop\",\n \"RecipeID\": 16242,\n \"Reagents\": {\n\t\t\t\"Greater Eternal Essence\": 3,\n\t\t\t\"Small Brilliant Shard\": 1\n }\n },\n \"Chest - Mana\": {\n \"ID\": 13607,\n \"Learn\": 145,\n \"Yellow\": 170,\n \"Green\": 190,\n \"Grey\": 210,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Greater Astral Essence\": 1,\n\t\t\t\"Lesser Astral Essence\": 2\n }\n },\n \"Chest - Minor Absorption\": {\n \"ID\": 7426,\n \"Learn\": 40,\n \"Yellow\": 90,\n \"Green\": 110,\n \"Grey\": 130,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Strange Dust\": 2,\n\t\t\t\"Lesser Magic Essence\": 1\n }\n },\n \"Chest - Minor Health\": {\n \"ID\": 7420,\n \"Learn\": 15,\n \"Yellow\": 70,\n \"Green\": 90,\n \"Grey\": 110,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Strange Dust\": 1\n }\n },\n \"Chest - Minor Mana\": {\n \"ID\": 7443,\n \"Learn\": 20,\n \"Yellow\": 80,\n \"Green\": 100,\n \"Grey\": 120,\n \"Source\": \"Vendor\",\n \"RecipeID\": 6342,\n \"Reagents\": {\n\t\t\t\"Lesser Magic Essence\": 1\n }\n },\n \"Chest - Minor Stats\": {\n \"ID\": 13626,\n \"Learn\": 150,\n \"Yellow\": 175,\n \"Green\": 195,\n \"Grey\": 215,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Greater Astral Essence\": 1,\n\t\t\t\"Soul Dust\": 1,\n\t\t\t\"Large Glimmering Shard\": 1\n }\n },\n \"Chest - Stats\": {\n \"ID\": 13941,\n \"Learn\": 245,\n \"Yellow\": 265,\n \"Green\": 285,\n \"Grey\": 305,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Large Radiant Shard\": 1,\n\t\t\t\"Dream Dust\": 3,\n\t\t\t\"Greater Nether Essence\": 2\n }\n },\n \"Chest - Superior Health\": {\n \"ID\": 13858,\n \"Learn\": 220,\n \"Yellow\": 240,\n \"Green\": 260,\n \"Grey\": 280,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Vision Dust\": 6\n }\n },\n \"Chest - Superior Mana\": {\n \"ID\": 13917,\n \"Learn\": 230,\n \"Yellow\": 250,\n \"Green\": 270,\n \"Grey\": 290,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Greater Nether Essence\": 1,\n\t\t\t\"Lesser Nether Essence\": 2\n }\n },\n \"Cloak - Defense\": {\n \"ID\": 13635,\n \"Learn\": 155,\n \"Yellow\": 175,\n \"Green\": 195,\n \"Grey\": 215,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Small Glowing Shard\": 1,\n\t\t\t\"Soul Dust\": 3\n }\n },\n \"Cloak - Fire Resistance\": {\n \"ID\": 13657,\n \"Learn\": 175,\n \"Yellow\": 195,\n \"Green\": 215,\n \"Grey\": 235,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Lesser Mystic Essence\": 1,\n\t\t\t\"Elemental Fire\": 1\n }\n },\n \"Cloak - Greater Defense\": {\n \"ID\": 13746,\n \"Learn\": 205,\n \"Yellow\": 225,\n \"Green\": 245,\n \"Grey\": 265,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Vision Dust\": 3\n }\n },\n \"Cloak - Greater Resistance\": {\n \"ID\": 20014,\n \"Learn\": 265,\n \"Yellow\": 285,\n \"Green\": 305,\n \"Grey\": 325,\n \"Source\": \"Drop\",\n \"RecipeID\": 16216,\n \"Reagents\": {\n\t\t\t\"Lesser Eternal Essence\": 2,\n\t\t\t\"Heart of Fire\": 1,\n\t\t\t\"Core of Earth\": 1,\n\t\t\t\"Globe of Water\": 1,\n\t\t\t\"Breath of Wind\": 1,\n\t\t\t\"Ichor of Undeath\": 1\n }\n },\n \"Cloak - Lesser Agility\": {\n \"ID\": 13882,\n \"Learn\": 225,\n \"Yellow\": 245,\n \"Green\": 265,\n \"Grey\": 285,\n \"Source\": \"Drop\",\n \"RecipeID\": 11206,\n \"Reagents\": {\n\t\t\t\"Lesser Nether Essence\": 2\n }\n },\n \"Cloak - Lesser Fire Resistance\": {\n \"ID\": 7861,\n \"Learn\": 125,\n \"Yellow\": 150,\n \"Green\": 170,\n \"Grey\": 190,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Fire Oil\": 1,\n\t\t\t\"Lesser Astral Essence\": 1\n }\n },\n \"Cloak - Lesser Protection\": {\n \"ID\": 13421,\n \"Learn\": 115,\n \"Yellow\": 140,\n \"Green\": 160,\n \"Grey\": 180,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Strange Dust\": 6,\n\t\t\t\"Small Glimmering Shard\": 1\n }\n },\n \"Cloak - Lesser Shadow Resistance\": {\n \"ID\": 13522,\n \"Learn\": 135,\n \"Yellow\": 160,\n \"Green\": 180,\n \"Grey\": 200,\n \"Source\": \"Drop\",\n \"RecipeID\": 11098,\n \"Reagents\": {\n\t\t\t\"Greater Astral Essence\": 1,\n\t\t\t\"Shadow Protection Potion\": 1\n }\n },\n \"Cloak - Minor Agility\": {\n \"ID\": 13419,\n \"Learn\": 110,\n \"Yellow\": 135,\n \"Green\": 155,\n \"Grey\": 175,\n \"Source\": \"Vendor\",\n \"RecipeID\": 11039,\n \"Reagents\": {\n\t\t\t\"Lesser Astral Essence\": 1\n }\n },\n \"Cloak - Minor Protection\": {\n \"ID\": 7771,\n \"Learn\": 70,\n \"Yellow\": 110,\n \"Green\": 130,\n \"Grey\": 150,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Strange Dust\": 3,\n\t\t\t\"Greater Magic Essence\": 1\n }\n },\n \"Cloak - Minor Resistance\": {\n \"ID\": 7454,\n \"Learn\": 45,\n \"Yellow\": 95,\n \"Green\": 115,\n \"Grey\": 135,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Strange Dust\": 1,\n\t\t\t\"Lesser Magic Essence\": 2\n }\n },\n \"Cloak - Resistance\": {\n \"ID\": 13794,\n \"Learn\": 205,\n \"Yellow\": 225,\n \"Green\": 245,\n \"Grey\": 265,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Lesser Nether Essence\": 1\n }\n },\n \"Cloak - Superior Defense\": {\n \"ID\": 20015,\n \"Learn\": 285,\n \"Yellow\": 305,\n \"Green\": 325,\n \"Grey\": 345,\n \"Source\": \"Vendor\",\n \"RecipeID\": 16224,\n \"Reagents\": {\n\t\t\t\"Illusion Dust\": 8\n }\n },\n \"Gloves - Advanced Herbalism\": {\n \"ID\": 13868,\n \"Learn\": 225,\n \"Yellow\": 245,\n \"Green\": 265,\n \"Grey\": 285,\n \"Source\": \"Drop\",\n \"RecipeID\": 11205,\n \"Reagents\": {\n\t\t\t\"Vision Dust\": 3,\n\t\t\t\"Sungrass\": 3\n }\n },\n \"Gloves - Advanded Mining\": {\n \"ID\": 13841,\n \"Learn\": 215,\n \"Yellow\": 235,\n \"Green\": 255,\n \"Grey\": 275,\n \"Source\": \"Drop\",\n \"RecipeID\": 11203,\n \"Reagents\": {\n\t\t\t\"Vision Dust\": 3,\n\t\t\t\"Truesilver Bar\": 3\n }\n },\n \"Gloves - Agility\": {\n \"ID\": 13815,\n \"Learn\": 210,\n \"Yellow\": 230,\n \"Green\": 250,\n \"Grey\": 270,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Lesser Nether Essence\": 1,\n\t\t\t\"Vision Dust\": 1\n }\n },\n \"Gloves - Fishing\": {\n \"ID\": 13620,\n \"Learn\": 145,\n \"Yellow\": 170,\n \"Green\": 190,\n \"Grey\": 210,\n \"Source\": \"Drop\",\n \"RecipeID\": 11152,\n \"Reagents\": {\n\t\t\t\"Soul Dust\": 1,\n\t\t\t\"Blackmouth Oil\": 3\n }\n },\n \"Gloves - Greater Agility\": {\n \"ID\": 20012,\n \"Learn\": 270,\n \"Yellow\": 290,\n \"Green\": 310,\n \"Grey\": 330,\n \"Source\": \"Drop\",\n \"RecipeID\": 16219,\n \"Reagents\": {\n\t\t\t\"Lesser Eternal Essence\": 3,\n\t\t\t\"Illusion Dust\": 3\n }\n },\n \"Gloves - Greater Strength\": {\n \"ID\": 20013,\n \"Learn\": 295,\n \"Yellow\": 315,\n \"Green\": 335,\n \"Grey\": 355,\n \"Source\": \"Drop\",\n \"RecipeID\": 16244,\n \"Reagents\": {\n\t\t\t\"Greater Eternal Essence\": 4,\n\t\t\t\"Illusion Dust\": 4\n }\n },\n \"Gloves - Herbalism\": {\n \"ID\": 13617,\n \"Learn\": 145,\n \"Yellow\": 170,\n \"Green\": 190,\n \"Grey\": 210,\n \"Source\": \"Drop\",\n \"RecipeID\": 11151,\n \"Reagents\": {\n\t\t\t\"Soul Dust\": 1,\n\t\t\t\"Kingsblood\": 3\n }\n },\n \"Gloves - Mining\": {\n \"ID\": 13612,\n \"Learn\": 145,\n \"Yellow\": 170,\n \"Green\": 190,\n \"Grey\": 210,\n \"Source\": \"Drop\",\n \"RecipeID\": 11150,\n \"Reagents\": {\n\t\t\t\"Soul Dust\": 1,\n\t\t\t\"Iron Ore\": 3\n }\n },\n \"Gloves - Minor Haste\": {\n \"ID\": 13948,\n \"Learn\": 250,\n \"Yellow\": 270,\n \"Green\": 290,\n \"Grey\": 310,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Large Radiant Shard\": 2,\n\t\t\t\"Wildvine\": 2\n }\n },\n \"Gloves - Riding Skill\": {\n \"ID\": 13947,\n \"Learn\": 250,\n \"Yellow\": 270,\n \"Green\": 290,\n \"Grey\": 310,\n \"Source\": \"Drop\",\n \"RecipeID\": 11226,\n \"Reagents\": {\n\t\t\t\"Large Radiant Shard\": 2,\n\t\t\t\"Dream Dust\": 3\n }\n },\n \"Gloves - Skinning\": {\n \"ID\": 13698,\n \"Learn\": 200,\n \"Yellow\": 220,\n \"Green\": 240,\n \"Grey\": 260,\n \"Source\": \"Drop\",\n \"RecipeID\": 11166,\n \"Reagents\": {\n\t\t\t\"Vision Dust\": 1,\n\t\t\t\"Green Whelp Scale\": 3\n }\n },\n \"Gloves - Strength\": {\n \"ID\": 13887,\n \"Learn\": 225,\n \"Yellow\": 245,\n \"Green\": 265,\n \"Grey\": 285,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Lesser Nether Essence\": 2,\n\t\t\t\"Vision Dust\": 3\n }\n },\n \"Shield - Frost Resistance\": {\n \"ID\": 13933,\n \"Learn\": 235,\n \"Yellow\": 255,\n \"Green\": 275,\n \"Grey\": 295,\n \"Source\": \"Drop\",\n \"RecipeID\": 11224,\n \"Reagents\": {\n\t\t\t\"Large Radiant Shard\": 1,\n\t\t\t\"Frost Oil\": 1\n }\n },\n \"Shield - Greater Spirit\": {\n \"ID\": 13905,\n \"Learn\": 230,\n \"Yellow\": 250,\n \"Green\": 270,\n \"Grey\": 290,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Greater Nether Essence\": 1,\n\t\t\t\"Dream Dust\": 2\n }\n },\n \"Shield - Greater Stamina\": {\n \"ID\": 20017,\n \"Learn\": 265,\n \"Yellow\": 285,\n \"Green\": 305,\n \"Grey\": 325,\n \"Source\": \"Vendor\",\n \"RecipeID\": 16217,\n \"Reagents\": {\n\t\t\t\"Dream Dust\": 10\n }\n },\n \"Shield - Lesser Block\": {\n \"ID\": 13689,\n \"Learn\": 195,\n \"Yellow\": 215,\n \"Green\": 235,\n \"Grey\": 255,\n \"Source\": \"Drop\",\n \"RecipeID\": 11168,\n \"Reagents\": {\n\t\t\t\"Greater Mystic Essence\": 2,\n\t\t\t\"Vision Dust\": 2,\n\t\t\t\"Large Glowing Shard\": 1\n }\n },\n \"Shield - Lesser Protection\": {\n \"ID\": 13464,\n \"Learn\": 115,\n \"Yellow\": 140,\n \"Green\": 160,\n \"Grey\": 180,\n \"Source\": \"Drop\",\n \"RecipeID\": 11081,\n \"Reagents\": {\n\t\t\t\"Lesser Astral Essence\": 1,\n\t\t\t\"Strange Dust\": 1,\n\t\t\t\"Small Glimmering Shard\": 1\n }\n },\n \"Shield - Lesser Spirit\": {\n \"ID\": 13485,\n \"Learn\": 130,\n \"Yellow\": 155,\n \"Green\": 175,\n \"Grey\": 195,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Lesser Astral Essence\": 2,\n\t\t\t\"Strange Dust\": 4\n }\n },\n \"Shield - Lesser Stamina\": {\n \"ID\": 13631,\n \"Learn\": 155,\n \"Yellow\": 175,\n \"Green\": 195,\n \"Grey\": 215,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Lesser Mystic Essence\": 1,\n\t\t\t\"Soul Dust\": 1\n }\n },\n \"Shield - Minor Stamina\": {\n \"ID\": 13378,\n \"Learn\": 105,\n \"Yellow\": 130,\n \"Green\": 150,\n \"Grey\": 170,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Lesser Astral Essence\": 1,\n\t\t\t\"Strange Dust\": 2\n }\n },\n \"Shield - Spirit\": {\n \"ID\": 13659,\n \"Learn\": 180,\n \"Yellow\": 200,\n \"Green\": 220,\n \"Grey\": 240,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Greater Mystic Essence\": 1,\n\t\t\t\"Vision Dust\": 1\n }\n },\n \"Shield - Stamina\": {\n \"ID\": 13817,\n \"Learn\": 210,\n \"Yellow\": 230,\n \"Green\": 250,\n \"Grey\": 270,\n \"Source\": \"Drop\",\n \"RecipeID\": 11202,\n \"Reagents\": {\n\t\t\t\"Vision Dust\": 5\n }\n },\n \"Shield - Superior Spirit\": {\n \"ID\": 20016,\n \"Learn\": 280,\n \"Yellow\": 300,\n \"Green\": 320,\n \"Grey\": 340,\n \"Source\": \"Drop\",\n \"RecipeID\": 16222,\n \"Reagents\": {\n\t\t\t\"Greater Eternal Essence\": 2,\n\t\t\t\"Illusion Dust\": 4\n }\n },\n \"Weapon - Agility\": {\n \"ID\": 23800,\n \"Learn\": 290,\n \"Yellow\": 310,\n \"Green\": 330,\n \"Grey\": 350,\n \"Source\": \"Reputation\",\n \"RecipeID\": 19445,\n \"Reagents\": {\n\t\t\t\"Large Brilliant Shard\": 6,\n\t\t\t\"Greater Eternal Essence\": 6,\n\t\t\t\"Illusion Dust\": 4,\n\t\t\t\"Essence of Air\": 2\n }\n },\n \"Weapon - Demonslaying\": {\n \"ID\": 13915,\n \"Learn\": 230,\n \"Yellow\": 250,\n \"Green\": 270,\n \"Grey\": 290,\n \"Source\": \"Drop\",\n \"RecipeID\": 11208,\n \"Reagents\": {\n\t\t\t\"Small Radiant Shard\": 1,\n\t\t\t\"Dream Dust\": 2,\n\t\t\t\"Elixir of Demonslaying\": 1\n }\n },\n \"Weapon - Fiery Weapon\": {\n \"ID\": 13898,\n \"Learn\": 265,\n \"Yellow\": 285,\n \"Green\": 305,\n \"Grey\": 325,\n \"Source\": \"Drop\",\n \"RecipeID\": 11207,\n \"Reagents\": {\n\t\t\t\"Small Radiant Shard\": 4,\n\t\t\t\"Essence of Fire\": 1\n }\n },\n \"Weapon - Greater Striking\": {\n \"ID\": 13943,\n \"Learn\": 245,\n \"Yellow\": 265,\n \"Green\": 285,\n \"Grey\": 305,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Large Radiant Shard\": 2,\n\t\t\t\"Greater Nether Essence\": 2\n }\n },\n \"Weapon - Icy Chill\": {\n \"ID\": 20029,\n \"Learn\": 285,\n \"Yellow\": 305,\n \"Green\": 325,\n \"Grey\": 345,\n \"Source\": \"Drop\",\n \"RecipeID\": 16223,\n \"Reagents\": {\n\t\t\t\"Small Brilliant Shard\": 4,\n\t\t\t\"Essence of Water\": 1,\n\t\t\t\"Essence of Air\": 1,\n\t\t\t\"Icecap\": 1\n }\n },\n \"Weapon - Lesser Beastslaying\": {\n \"ID\": 13653,\n \"Learn\": 175,\n \"Yellow\": 195,\n \"Green\": 215,\n \"Grey\": 235,\n \"Source\": \"Drop\",\n \"RecipeID\": 11164,\n \"Reagents\": {\n\t\t\t\"Lesser Mystic Essence\": 1,\n\t\t\t\"Large Fang\": 2,\n\t\t\t\"Small Glowing Shard\": 1\n }\n },\n \"Weapon - Lesser Elemental Slayer\": {\n \"ID\": 13655,\n \"Learn\": 175,\n \"Yellow\": 195,\n \"Green\": 215,\n \"Grey\": 235,\n \"Source\": \"Drop\",\n \"RecipeID\": 11165,\n \"Reagents\": {\n\t\t\t\"Lesser Mystic Essence\": 1,\n\t\t\t\"Elemental Earth\": 1,\n\t\t\t\"Small Glowing Shard\": 1\n }\n },\n \"Weapon - Lesser Striking\": {\n \"ID\": 13503,\n \"Learn\": 140,\n \"Yellow\": 165,\n \"Green\": 185,\n \"Grey\": 205,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Soul Dust\": 2,\n\t\t\t\"Large Glimmering Shard\": 1\n }\n },\n \"Weapon - Minor Beastslayer\": {\n \"ID\": 7786,\n \"Learn\": 90,\n \"Yellow\": 120,\n \"Green\": 140,\n \"Grey\": 160,\n \"Source\": \"Drop\",\n \"RecipeID\": 6348,\n \"Reagents\": {\n\t\t\t\"Strange Dust\": 4,\n\t\t\t\"Greater Magic Essence\": 2\n }\n },\n \"Weapon - Minor Striking\": {\n \"ID\": 7788,\n \"Learn\": 90,\n \"Yellow\": 120,\n \"Green\": 140,\n \"Grey\": 160,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Strange Dust\": 2,\n\t\t\t\"Greater Magic Essence\": 1,\n\t\t\t\"Small Glimmering Shard\": 1\n }\n },\n \"Weapon - Strength\": {\n \"ID\": 23799,\n \"Learn\": 290,\n \"Yellow\": 310,\n \"Green\": 330,\n \"Grey\": 350,\n \"Source\": \"Reputation\",\n \"RecipeID\": 19444,\n \"Reagents\": {\n\t\t\t\"Large Brilliant Shard\": 6,\n\t\t\t\"Greater Eternal Essence\": 6,\n\t\t\t\"Illusion Dust\": 4,\n\t\t\t\"Essence of Earth\": 2\n }\n },\n \"Weapon - Striking\": {\n \"ID\": 13693,\n \"Learn\": 195,\n \"Yellow\": 215,\n \"Green\": 235,\n \"Grey\": 255,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Greater Mystic Essence\": 2,\n\t\t\t\"Large Glowing Shard\": 1\n }\n },\n \"Weapon - Unholy Weapon\": {\n \"ID\": 20033,\n \"Learn\": 295,\n \"Yellow\": 315,\n \"Green\": 335,\n \"Grey\": 355,\n \"Source\": \"Drop\",\n \"RecipeID\": 16248,\n\t\t\"Reagents\": {\n\t\t\t\"Large Brilliant Shard\": 4,\n\t\t\t\"Essence of Undeath\": 4\n }\n },\n \"Weapon - Winters Might\": {\n \"ID\": 21931,\n \"Learn\": 190,\n \"Yellow\": 210,\n \"Green\": 230,\n \"Grey\": 250,\n \"Source\": \"Drop\",\n \"RecipeID\": 17725,\n \"Reagents\": {\n\t\t\t\"Greater Mystic Essence\": 3,\n\t\t\t\"Vision Dust\": 3,\n\t\t\t\"Large Glowing Shard\": 1,\n\t\t\t\"Wintersbite\": 2\n }\n },\n \"Enchanted Leather\": {\n \"ID\": 17181,\n \"Learn\": 250,\n \"Yellow\": 250,\n \"Green\": 255,\n \"Grey\": 260,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Rugged Leather\": 1,\n\t\t\t\"Lesser Eternal Essence\": 1\n }\n },\n \"Enchanted Thorium\": {\n \"ID\": 17180,\n \"Learn\": 250,\n \"Yellow\": 250,\n \"Green\": 255,\n \"Grey\": 260,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Thorium Bar\": 1,\n\t\t\t\"Dream Dust\": 3\n }\n },\n \"Greater Magic Wand\": {\n \"ID\": 14807,\n \"Learn\": 70,\n \"Yellow\": 110,\n \"Green\": 130,\n \"Grey\": 150,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Simple Wood\": 1,\n\t\t\t\"Greater Magic Essence\": 1\n }\n },\n \"Greater Mystic Wand\": {\n \"ID\": 14810,\n \"Learn\": 175,\n \"Yellow\": 195,\n \"Green\": 215,\n \"Grey\": 235,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Star Wood\": 1,\n\t\t\t\"Greater Mystic Essence\": 1,\n\t\t\t\"Vision Dust\": 1\n }\n },\n \"Lesser Magic Wand\": {\n \"ID\": 14293,\n \"Learn\": 10,\n \"Yellow\": 75,\n \"Green\": 95,\n \"Grey\": 115,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Simple Wood\": 1,\n\t\t\t\"Lesser Magic Essence\": 1\n }\n },\n \"Lesser Mana Oil\": {\n \"ID\": 25127,\n \"Learn-PHASE-5\": 250,\n \"Yellow\": 260,\n \"Green\": 270,\n \"Grey\": 280,\n \"Source\": \"Vendor\",\n \"RecipeID\": 20754,\n \"Reagents\": {\n\t\t\t\"Dream Dust\": 3,\n\t\t\t\"Purple Lotus\": 2,\n\t\t\t\"Crystal Vial\": 1\n }\n },\n \"Lesser Mystic Wand\": {\n \"ID\": 14809,\n \"Learn\": 155,\n \"Yellow\": 175,\n \"Green\": 195,\n \"Grey\": 215,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Star Wood\": 1,\n\t\t\t\"Lesser Mystic Essence\": 1,\n\t\t\t\"Soul Dust\": 1\n }\n },\n \"Lesser Wizard Oil\": {\n \"ID\": 25126,\n \"Learn-PHASE-5\": 200,\n \"Yellow\": 210,\n \"Green\": 220,\n \"Grey\": 230,\n \"Source\": \"Vendor\",\n \"RecipeID\": 20753,\n \"Reagents\": {\n\t\t\t\"Vision Dust\": 3,\n\t\t\t\"Stranglethorn Seed\": 2,\n\t\t\t\"Leaded Vial\": 1\n }\n },\n \"Minor Mana Oil\": {\n \"ID\": 25125,\n \"Learn-PHASE-5\": 150,\n \"Yellow\": 160,\n \"Green\": 170,\n \"Grey\": 180,\n \"Source\": \"Vendor\",\n \"RecipeID\": 20752,\n \"Reagents\": {\n\t\t\t\"Soul Dust\": 3,\n\t\t\t\"Maple Seed\": 2,\n\t\t\t\"Leaded Vial\": 1\n }\n },\n \"Minor Mana Oil\": {\n \"ID\": 25125,\n \"Learn-PHASE-5\": 150,\n \"Yellow\": 160,\n \"Green\": 170,\n \"Grey\": 180,\n \"Source\": \"Vendor\",\n \"RecipeID\": 20752,\n \"Reagents\": {\n\t\t\t\"Soul Dust\": 3,\n\t\t\t\"Maple Seed\": 2,\n\t\t\t\"Leaded Vial\": 1\n }\n },\n \"Minor Wizard Oil\": {\n \"ID\": 25124,\n \"Learn-PHASE-5\": 45,\n \"Yellow\": 55,\n \"Green\": 65,\n \"Grey\": 75,\n \"Source\": \"Vendor\",\n \"RecipeID\": 20758,\n \"Reagents\": {\n\t\t\t\"Strange Dust\": 2,\n\t\t\t\"Maple Seed\": 1,\n\t\t\t\"Empty Vial\": 1\n }\n },\n \"Runed Arcanite Rod\": {\n \"ID\": 20051,\n \"Learn\": 290,\n \"Yellow\": 310,\n \"Green\": 330,\n \"Grey\": 350,\n \"Source\": \"VendorLimited\",\n \"RecipeID\": 16243,\n \"Reagents\": {\n\t\t\t\"Arcanite Rod\": 1,\n \"Golden Pearl\": 1,\n \"Illusion Dust\": 10,\n \"Greater Eternal Essence\": 4,\n \"Small Brilliant Shard\": 4,\n \"Large Brilliant Shard\": 2\n }\n },\n \"Runed Copper Rod\": {\n \"ID\": 7421,\n \"Learn\": 1,\n \"Yellow\": 5,\n \"Green\": 7,\n \"Grey\": 10,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Copper Rod\": 1,\n \"Strange Dust\": 1,\n \"Lesser Magic Essence\": 1\n }\n },\n \"Runed Golden Rod\": {\n \"ID\": 13628,\n \"Learn\": 150,\n \"Yellow\": 175,\n \"Green\": 195,\n \"Grey\": 215,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Golden Rod\": 1,\n \"Iridescent Pearl\": 1,\n \"Greater Astral Essence\": 2,\n \"Soul Dust\": 2\n }\n },\n \"Runed Silver Rod\": {\n \"ID\": 7795,\n \"Learn\": 100,\n \"Yellow\": 130,\n \"Green\": 150,\n \"Grey\": 170,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Silver Rod\": 1,\n \"Strange Dust\": 6,\n \"Greater Magic Essence\": 3,\n \"Shadowgem\": 1\n }\n },\n \"Runed Truesilver Rod\": {\n \"ID\": 13702,\n \"Learn\": 200,\n \"Yellow\": 220,\n \"Green\": 240,\n \"Grey\": 260,\n \"Source\": \"Trainer\",\n \"Reagents\": {\n\t\t\t\"Truesilver Rod\": 1,\n \"Black Pearl\": 1,\n \"Greater Mystic Essence\": 2,\n \"Vision Dust\": 2\n }\n },\n \"Smoking Heart of the Mountain\": {\n \"ID\": 15596,\n \"Learn\": 265,\n \"Yellow\": 285,\n \"Green\": 305,\n \"Grey\": 325,\n \"Source\": \"Drop\",\n \"RecipeID\": 11813,\n \"Reagents\": {\n\t\t\t\"Blood of the Mountain\": 1,\n\t\t\t\"Essence of Fire\": 1,\n\t\t\t\"Small Brilliant Shard\": 3\n }\n },\n \"Wizard Oil\": {\n \"ID\": 25128,\n \"Learn-PHASE-5\": 275,\n \"Yellow\": 285,\n \"Green\": 295,\n \"Grey\": 305,\n \"Source\": \"Vendor\",\n \"RecipeID\": 20755,\n \"Reagents\": {\n\t\t\t\"Illusion Dust\": 3,\n\t\t\t\"Firebloom\": 2,\n\t\t\t\"Crystal Vial\": 1\n }\n }\n}\n\nimport json\nwith open('enchanting.json', 'w') as jsonFile:\n json.dump(recipes, jsonFile)", "sub_path": "app/data/exportEnchanting.py", "file_name": "exportEnchanting.py", "file_ext": "py", "file_size_in_byte": 38289, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "json.dump", "line_number": 1636, "usage_type": "call"}]} +{"seq_id": "114874687", "text": "\"\"\"\r\nAuthor : Asif Khan M Pathan\r\nTest Automation Framework\r\n\"\"\"\r\n\r\nimport json\r\n\r\n\r\nclass ConfigModule:\r\n \"\"\"\r\n Configuration Module\r\n \"\"\"\r\n def __init__(self, filename):\r\n self.config_file = filename\r\n self.config_data = None\r\n\r\n def get_config_parameters(self):\r\n \"\"\"\r\n Read the Config Data from Json file\r\n :return: None\r\n \"\"\"\r\n with open(self.config_file, ) as file_ptr:\r\n self.config_data = json.load(file_ptr)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(\"Config Module Class\")\r\n cm = ConfigModule('deviceconfig.json')\r\n cm.get_config_parameters()\r\n print(\"Config Data Value is \", cm.config_data)\r\n", "sub_path": "config/configmodule.py", "file_name": "configmodule.py", "file_ext": "py", "file_size_in_byte": 687, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "json.load", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "73522787", "text": "# -*- coding: utf-8 -*-\n###############################################################################\n#\n# Tech-Receptives Solutions Pvt. Ltd.\n# Copyright (C) 2009-TODAY Tech-Receptives().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see .\n#\n###############################################################################\n\nimport calendar\nimport datetime\nimport pytz\nimport time\n\nfrom odoo import models, fields, api, _\nfrom odoo.exceptions import ValidationError\n\n\nclass accrGenerateSession(models.TransientModel):\n _name = \"accr.generate.time.table\"\n _description = \"Generate Sessions\"\n\n time_table_lines = fields.One2many(\n 'accr.gen.time.table.line', 'gen_time_table', 'Time Table Lines')\n time_table_lines_1 = fields.One2many(\n 'accr.gen.time.table.line', 'gen_time_table', 'Time Table Lines1',\n domain=[('day', '=', '0')])\n time_table_lines_2 = fields.One2many(\n 'accr.gen.time.table.line', 'gen_time_table', 'Time Table Lines2',\n domain=[('day', '=', '1')])\n time_table_lines_3 = fields.One2many(\n 'accr.gen.time.table.line', 'gen_time_table', 'Time Table Lines3',\n domain=[('day', '=', '2')])\n time_table_lines_4 = fields.One2many(\n 'accr.gen.time.table.line', 'gen_time_table', 'Time Table Lines4',\n domain=[('day', '=', '3')])\n time_table_lines_5 = fields.One2many(\n 'accr.gen.time.table.line', 'gen_time_table', 'Time Table Lines5',\n domain=[('day', '=', '4')])\n time_table_lines_6 = fields.One2many(\n 'accr.gen.time.table.line', 'gen_time_table', 'Time Table Lines6',\n domain=[('day', '=', '5')])\n time_table_lines_7 = fields.One2many(\n 'accr.gen.time.table.line', 'gen_time_table', 'Time Table Lines7',\n domain=[('day', '=', '6')])\n start_date = fields.Date(\n 'Start Date', required=True, default=time.strftime('%Y-%m-01'))\n end_date = fields.Date('End Date', required=True)\n section = fields.Many2one('x_student_residential_sections', 'Section', required=True)\n\n @api.constrains('start_date', 'end_date')\n def check_dates(self):\n start_date = fields.Date.from_string(self.start_date)\n end_date = fields.Date.from_string(self.end_date)\n if start_date > end_date:\n raise ValidationError(_(\"End Date cannot be set before \\\n Start Date.\"))\n\n @api.multi\n def act_gen_time_table(self):\n for session in self:\n start_date = session.start_date\n end_date = session.end_date\n\n for n in range((end_date - start_date).days + 1):\n curr_date = start_date + datetime.timedelta(n)\n for line in session.time_table_lines:\n if int(line.day) == curr_date.weekday():\n hour = line.timing_id.hour\n if line.timing_id.am_pm == 'pm' and int(hour) != 12:\n hour = int(hour) + 12\n per_time = '%s:%s:00' % (hour, line.timing_id.minute)\n final_date = datetime.datetime.strptime(\n curr_date.strftime('%Y-%m-%d ') +\n per_time, '%Y-%m-%d %H:%M:%S')\n local_tz = pytz.timezone(\n self.env.user.partner_id.tz or 'GMT')\n local_dt = local_tz.localize(final_date, is_dst=None)\n utc_dt = local_dt.astimezone(pytz.utc)\n utc_dt = utc_dt.strftime(\"%Y-%m-%d %H:%M:%S\")\n curr_start_date = datetime.datetime.strptime(\n utc_dt, \"%Y-%m-%d %H:%M:%S\")\n curr_end_date = curr_start_date + datetime.timedelta(\n hours=line.timing_id.duration)\n self.env['accr.session'].create({\n 'timing_id': line.timing_id.id,\n 'start_datetime':\n curr_start_date.strftime(\"%Y-%m-%d %H:%M:%S\"),\n 'end_datetime':\n curr_end_date.strftime(\"%Y-%m-%d %H:%M:%S\"),\n 'type': calendar.day_name[int(line.day)],\n 'section': session.section.id,\n 'color': 4,\n })\n return {'type': 'ir.actions.act_window_close'}\n\n\nclass accrGenerateSessionLine(models.TransientModel):\n _name = 'accr.gen.time.table.line'\n _description = 'Generate Time Table Lines'\n _rec_name = 'day'\n\n gen_time_table = fields.Many2one(\n 'accr.generate.time.table', 'Time Table', required=True)\n timing_id = fields.Many2one('accr.timing', 'Timing', required=True)\n timint_hour = fields.Selection(related='timing_id.hour', string=u'Hour', readonly=True, store=False, )\n timint_minute = fields.Selection(related='timing_id.minute', string=u'Minute', readonly=True, store=False, )\n timint_am_pm = fields.Selection(related='timing_id.am_pm', string=u'AP / PM', readonly=True, store=False, )\n timint_duration = fields.Float(related='timing_id.duration', string=u'Duration', readonly=True, store=False, )\n timing_type = fields.Selection(related='timing_id.timing_type', string=u'Type', readonly=True, store=False, )\n day = fields.Selection([\n ('0', calendar.day_name[0]),\n ('1', calendar.day_name[1]),\n ('2', calendar.day_name[2]),\n ('3', calendar.day_name[3]),\n ('4', calendar.day_name[4]),\n ('5', calendar.day_name[5]),\n ('6', calendar.day_name[6]),\n ], 'Day', required=True)\n", "sub_path": "accr_scheduler/wizard/accr_generate_timetable.py", "file_name": "accr_generate_timetable.py", "file_ext": "py", "file_size_in_byte": 6290, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "odoo.models.TransientModel", "line_number": 31, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 31, "usage_type": "name"}, {"api_name": "odoo.fields.One2many", "line_number": 35, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 35, "usage_type": "name"}, {"api_name": "odoo.fields.One2many", "line_number": 37, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 37, "usage_type": "name"}, {"api_name": "odoo.fields.One2many", "line_number": 40, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 40, "usage_type": "name"}, {"api_name": "odoo.fields.One2many", "line_number": 43, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 43, "usage_type": "name"}, {"api_name": "odoo.fields.One2many", "line_number": 46, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 46, "usage_type": "name"}, {"api_name": "odoo.fields.One2many", "line_number": 49, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 49, "usage_type": "name"}, {"api_name": "odoo.fields.One2many", "line_number": 52, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 52, "usage_type": "name"}, {"api_name": "odoo.fields.One2many", "line_number": 55, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 55, "usage_type": "name"}, {"api_name": "odoo.fields.Date", "line_number": 58, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 58, "usage_type": "name"}, {"api_name": "time.strftime", "line_number": 59, "usage_type": "call"}, {"api_name": "odoo.fields.Date", "line_number": 60, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 60, "usage_type": "name"}, {"api_name": "odoo.fields.Many2one", "line_number": 61, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 61, "usage_type": "name"}, {"api_name": "odoo.fields.Date.from_string", "line_number": 65, "usage_type": "call"}, {"api_name": "odoo.fields.Date", "line_number": 65, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 65, "usage_type": "name"}, {"api_name": "odoo.fields.Date.from_string", "line_number": 66, "usage_type": "call"}, {"api_name": "odoo.fields.Date", "line_number": 66, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 66, "usage_type": "name"}, {"api_name": "odoo.exceptions.ValidationError", "line_number": 68, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 68, "usage_type": "call"}, {"api_name": "odoo.api.constrains", "line_number": 63, "usage_type": "call"}, {"api_name": "odoo.api", "line_number": 63, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 78, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 85, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 85, "usage_type": "attribute"}, {"api_name": "pytz.timezone", "line_number": 88, "usage_type": "call"}, {"api_name": "pytz.utc", "line_number": 91, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 93, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 93, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 95, "usage_type": "call"}, {"api_name": "calendar.day_name", "line_number": 103, "usage_type": "attribute"}, {"api_name": "odoo.api.multi", "line_number": 71, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 71, "usage_type": "name"}, {"api_name": "odoo.models.TransientModel", "line_number": 110, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 110, "usage_type": "name"}, {"api_name": "odoo.fields.Many2one", "line_number": 115, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 115, "usage_type": "name"}, {"api_name": "odoo.fields.Many2one", "line_number": 117, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 117, "usage_type": "name"}, {"api_name": "odoo.fields.Selection", "line_number": 118, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 118, "usage_type": "name"}, {"api_name": "odoo.fields.Selection", "line_number": 119, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 119, "usage_type": "name"}, {"api_name": "odoo.fields.Selection", "line_number": 120, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 120, "usage_type": "name"}, {"api_name": "odoo.fields.Float", "line_number": 121, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 121, "usage_type": "name"}, {"api_name": "odoo.fields.Selection", "line_number": 122, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 122, "usage_type": "name"}, {"api_name": "odoo.fields.Selection", "line_number": 123, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 123, "usage_type": "name"}, {"api_name": "calendar.day_name", "line_number": 124, "usage_type": "attribute"}, {"api_name": "calendar.day_name", "line_number": 125, "usage_type": "attribute"}, {"api_name": "calendar.day_name", "line_number": 126, "usage_type": "attribute"}, {"api_name": "calendar.day_name", "line_number": 127, "usage_type": "attribute"}, {"api_name": "calendar.day_name", "line_number": 128, "usage_type": "attribute"}, {"api_name": "calendar.day_name", "line_number": 129, "usage_type": "attribute"}, {"api_name": "calendar.day_name", "line_number": 130, "usage_type": "attribute"}]} +{"seq_id": "320124812", "text": "from flask import Blueprint, render_template, request, redirect, url_for\n\napp = Blueprint('app', __name__)\n\nfrom models import Post, Category\nfrom app import db\n\n\n@app.route('/')\ndef index():\n posts = Post.query.all()\n return render_template(\"index.html\", posts=posts)\n\n\n@app.route('/addpost', methods=['POST', 'GET'])\ndef add():\n content = request.form['content']\n category = Category(request.form['category'])\n\n post = Post(content, category)\n db.session.add(post)\n db.session.commit()\n\n return redirect(url_for('app.index'))", "sub_path": "routes.py", "file_name": "routes.py", "file_ext": "py", "file_size_in_byte": 551, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "flask.Blueprint", "line_number": 3, "usage_type": "call"}, {"api_name": "models.Post.query.all", "line_number": 11, "usage_type": "call"}, {"api_name": "models.Post.query", "line_number": 11, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 11, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 12, "usage_type": "call"}, {"api_name": "app.route", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 17, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 17, "usage_type": "name"}, {"api_name": "models.Category", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 18, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 18, "usage_type": "name"}, {"api_name": "models.Post", "line_number": 20, "usage_type": "call"}, {"api_name": "app.db.session.add", "line_number": 21, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 21, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 21, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 22, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 22, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 22, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 24, "usage_type": "call"}, {"api_name": "app.route", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "426281882", "text": "import pygame\nfrom pygame.color import THECOLORS\npygame.init()\ns = pygame.display.set_mode([600,1000])\ns.fill('antiquewhite')\nx = 300\ny = 100\nxx = 300\nyy = 800\npygame.display.set_caption('Python Pong')\npygame.font.SysFont('Arial', '18')\nload = pygame.font.Font.render('Loading, please wait...', True, white, None)\ns.blit(load, [300,100])\npygame.display.flip()\nballfd = pygame.image.load('pypongball.gif')\ns.blit(ballfd, [x,y]) \npygame.display.flip()\nhitfd = pygame.image.load('pyponghitter.gif')\ns.blit(hitfd, [xx,yy])\npygame.display.flip() \npygame.draw.rect(s, white, [270, 330, 60, 170], 0)\npygame.display.flip()\npygame.mixer.Sound('w2pypong.wav')\npygame.mixer.Sound.play(0, 0, 0)\nlives = 3\ndef events(self, event):\n pygame.event.get()\nwhile lives >= 3:\n for event in events:\n if event.type == K_LEFT:\n pygame.draw.rect(s, 'antiquewhite', [xx, xx + 100, yy, yy - 100], 0)\n xx = xx - 50\n s.blit(hitfd, [xx, yy])\n pygame.display.flip()\n if event.type == K_RIGHT:\n pygame.draw.rect(s,'antiquewhite', [xx, xx + 100, yy, yy - 100], 0)\n xx = xx + 50\n s.blit(hitfd, [xx, yy])\n pygame.display.flip()\n \n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "pypong.py", "file_name": "pypong.py", "file_ext": "py", "file_size_in_byte": 1294, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "pygame.init", "line_number": 3, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 4, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 4, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pygame.font.Font.render", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound.play", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 31, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 31, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 36, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 39, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 39, "usage_type": "attribute"}]} +{"seq_id": "277391520", "text": "import torch\nimport torch.nn as nn\nimport time\n\nglobal lastlog\nlastlog = time.time()\n\n\ndef mylog(content, cls=\"DEBUG\"):\n now = time.time()\n global lastlog\n print(\"{cls}: {content}, interval:{interval}, from device {device}\".format(\n cls=cls, content=content, interval=now - lastlog, device=torch.cuda.current_device()))\n lastlog = now\n\n\nclass BasicConv2d(nn.Module):\n\n def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0, apply_relu=True):\n super(BasicConv2d, self).__init__()\n self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size,\n stride=stride, padding=padding, bias=False) # verify bias false\n self.bn = nn.BatchNorm2d(out_planes)\n self.apply_relu = apply_relu\n if self.apply_relu:\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n if self.apply_relu:\n x = self.relu(x)\n return x\n\n\nclass Inception(nn.Module):\n\n def __init__(self, in_planes, out_planes, branches, reduce=False):\n super(Inception, self).__init__()\n assert(type(branches) == type([]) and len(branches)==3)\n stride = 1\n if reduce:\n stride = 2\n self.projection = (in_planes != out_planes)\n if self.projection:\n self.branch0 = BasicConv2d(\n in_planes, out_planes, kernel_size=1, stride=stride, apply_relu=False)\n\n self.branch1 = BasicConv2d(in_planes, branches[0], kernel_size=1, stride=stride)\n\n self.branch2 = nn.Sequential(\n BasicConv2d(in_planes, branches[1], kernel_size=1, stride=1),\n BasicConv2d(branches[1], branches[1],\n kernel_size=3, stride=stride, padding=1),\n )\n\n self.branch3 = nn.Sequential(\n BasicConv2d(in_planes, branches[2], kernel_size=1, stride=1),\n BasicConv2d(branches[2], branches[2], kernel_size=3, stride=1, padding=1),\n BasicConv2d(branches[2], branches[2],\n kernel_size=3, stride=stride, padding=1),\n )\n\n self.conv = BasicConv2d(sum(branches), out_planes, kernel_size=1,\n stride=1, apply_relu=False)\n self.bn = nn.BatchNorm2d(out_planes, momentum=0.05)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n x0 = x\n if self.projection:\n x0 = self.branch0(x)\n x1 = self.branch1(x)\n x2 = self.branch2(x)\n x3 = self.branch3(x)\n out = torch.cat((x1, x2, x3), 1)\n out = self.bn(self.conv(out))\n out = self.relu(out + x0)\n return out\n\n\nclass IR18(nn.Module):\n\n def __init__(self, num_classes=2):\n super(IR18, self).__init__()\n # TODO preprocess 是新增的处理。因为输入是3x448x448的数据\n self.preprocess = nn.Sequential(\n nn.Conv2d(3, 3, kernel_size=3, stride=2, padding=1),\n nn.BatchNorm2d(3),\n nn.ReLU(inplace=True)\n )\n self.stage1 = nn.Sequential(\n BasicConv2d(3, 24, kernel_size=3, stride=2, padding=1),\n BasicConv2d(24, 12, kernel_size=3, stride=1, padding=1),\n BasicConv2d(12, 96, kernel_size=3, stride=2, padding=1),\n nn.MaxPool2d(3, stride=2, ceil_mode=True),\n Inception(96, 384, [24, 48, 48]),\n Inception(384, 384, [24, 48, 48]),\n )\n self.stage2 = nn.Sequential(\n Inception(384, 96, [24, 48, 48], reduce=True),\n Inception(96, 96, [24, 48, 48]),\n )\n self.stage3 = nn.Sequential(\n Inception(96, 384, [48, 96, 96], reduce=True),\n Inception(384, 384, [96, 192, 192]),\n )\n self.stage4 = nn.Sequential(\n nn.AvgPool2d(3, stride=2, ceil_mode=True),\n nn.ReLU(inplace=True),\n nn.AvgPool2d(3, stride=1, ceil_mode=True),\n )\n self.stage5 = nn.Sequential(\n nn.Linear(384, 196),\n nn.ReLU(inplace=True),\n nn.Linear(196, 100),\n nn.ReLU(inplace=True),\n nn.Linear(100, num_classes),\n )\n\n def forward(self, x):\n x = self.preprocess(x)\n x = self.stage1(x)\n x = self.stage2(x)\n x = self.stage3(x)\n x = self.stage4(x)\n x = x.view(x.size(0), -1)\n x = self.stage5(x)\n return x\n\n\ndef test():\n model = IR18()\n model.eval()\n inputs = torch.autograd.Variable(torch.zeros(2, 3, 224, 224))\n mylog(\"Testing\")\n out4 = model(inputs)\n print(out4)\n\n\nif __name__ == '__main__':\n test()\n", "sub_path": "1channel_access_check/ir18.py", "file_name": "ir18.py", "file_ext": "py", "file_size_in_byte": 4644, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "time.time", "line_number": 6, "usage_type": "call"}, {"api_name": "time.time", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.cuda.current_device", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 13, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 17, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 36, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 51, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 57, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 66, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 67, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 82, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 82, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 87, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 88, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 89, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 90, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 92, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 96, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 100, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 104, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 108, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 108, "usage_type": "name"}, {"api_name": "torch.nn.AvgPool2d", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 109, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 110, "usage_type": "name"}, {"api_name": "torch.nn.AvgPool2d", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 111, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 113, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 114, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 115, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 116, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 117, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 117, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 118, "usage_type": "name"}, {"api_name": "torch.autograd.Variable", "line_number": 135, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 135, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 135, "usage_type": "call"}]} +{"seq_id": "351649329", "text": "import nengo\nfrom nengo.builder.learning_rules import SimVoja, SimOja, SimBCM\nimport numpy as np\nimport pytest\n\n\n@pytest.mark.parametrize(\"rule\", (nengo.Voja, nengo.Oja, nengo.BCM))\ndef test_merged_learning(Simulator, rule, seed):\n # a slightly more complicated network with mergeable learning rules, to\n # make sure that works OK\n dimensions = 2\n with nengo.Network(seed=seed) as net:\n a = nengo.Ensemble(3, dimensions)\n b = nengo.Ensemble(3, dimensions)\n c = nengo.Ensemble(5, dimensions)\n d = nengo.Ensemble(10, dimensions)\n\n conn0 = nengo.Connection(\n a, c, learning_rule_type=rule(),\n solver=nengo.solvers.LstsqL2(weights=rule != nengo.Voja))\n conn1 = nengo.Connection(\n b, d, learning_rule_type=rule(),\n solver=nengo.solvers.LstsqL2(weights=rule != nengo.Voja))\n\n p0 = nengo.Probe(conn0.learning_rule, \"delta\")\n p1 = nengo.Probe(conn1.learning_rule, \"delta\")\n\n with nengo.Simulator(net) as sim:\n sim.run_steps(10)\n\n canonical = (sim.data[p0], sim.data[p1])\n\n with Simulator(net) as sim:\n build_type = {nengo.Voja: SimVoja, nengo.Oja: SimOja,\n nengo.BCM: SimBCM}\n\n assert len([x for x in sim.tensor_graph.plan\n if type(x[0]) == build_type[rule]]) == 1\n\n sim.run_steps(10)\n\n assert np.allclose(sim.data[p0], canonical[0])\n assert np.allclose(sim.data[p1], canonical[1])\n", "sub_path": "nengo_dl/tests/test_learning_rules.py", "file_name": "test_learning_rules.py", "file_ext": "py", "file_size_in_byte": 1481, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "nengo.Network", "line_number": 12, "usage_type": "call"}, {"api_name": "nengo.Ensemble", "line_number": 13, "usage_type": "call"}, {"api_name": "nengo.Ensemble", "line_number": 14, "usage_type": "call"}, {"api_name": "nengo.Ensemble", "line_number": 15, "usage_type": "call"}, {"api_name": "nengo.Ensemble", "line_number": 16, "usage_type": "call"}, {"api_name": "nengo.Connection", "line_number": 18, "usage_type": "call"}, {"api_name": "nengo.solvers.LstsqL2", "line_number": 20, "usage_type": "call"}, {"api_name": "nengo.solvers", "line_number": 20, "usage_type": "attribute"}, {"api_name": "nengo.Voja", "line_number": 20, "usage_type": "attribute"}, {"api_name": "nengo.Connection", "line_number": 21, "usage_type": "call"}, {"api_name": "nengo.solvers.LstsqL2", "line_number": 23, "usage_type": "call"}, {"api_name": "nengo.solvers", "line_number": 23, "usage_type": "attribute"}, {"api_name": "nengo.Voja", "line_number": 23, "usage_type": "attribute"}, {"api_name": "nengo.Probe", "line_number": 25, "usage_type": "call"}, {"api_name": "nengo.Probe", "line_number": 26, "usage_type": "call"}, {"api_name": "nengo.Simulator", "line_number": 28, "usage_type": "call"}, {"api_name": "nengo.Voja", "line_number": 34, "usage_type": "attribute"}, {"api_name": "nengo.Oja", "line_number": 34, "usage_type": "attribute"}, {"api_name": "nengo.BCM", "line_number": 35, "usage_type": "attribute"}, {"api_name": "nengo.builder.learning_rules.SimVoja", "line_number": 34, "usage_type": "name"}, {"api_name": "nengo.builder.learning_rules.SimOja", "line_number": 34, "usage_type": "name"}, {"api_name": "nengo.builder.learning_rules.SimBCM", "line_number": 35, "usage_type": "name"}, {"api_name": "numpy.allclose", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 43, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 7, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 7, "usage_type": "attribute"}, {"api_name": "nengo.Voja", "line_number": 7, "usage_type": "attribute"}, {"api_name": "nengo.Oja", "line_number": 7, "usage_type": "attribute"}, {"api_name": "nengo.BCM", "line_number": 7, "usage_type": "attribute"}]} +{"seq_id": "378890898", "text": "import unittest\nfrom unittest.mock import patch, Mock, call\nfrom io import BytesIO\n\nfrom hypothesis import given\nfrom hypothesis.strategies import text, characters, integers, from_regex\n\nfrom make_prg.prg_encoder import PrgEncoder, ConversionError, EncodeError\n\n\nclass TestPrgEncoder(unittest.TestCase):\n def test_dnaToInt_empty_string_raises_assert_error(self):\n encoder = PrgEncoder()\n char = \"\"\n\n with self.assertRaises(ConversionError) as context:\n encoder._dna_to_int(char)\n\n self.assertTrue(\"Char '' is not in\" in str(context.exception))\n\n @given(text(alphabet=characters(blacklist_characters=\"ACGTacgt\")))\n def test_dnaToInt_char_not_valid_raises_assert_error(self, char):\n encoder = PrgEncoder()\n\n with self.assertRaises(ConversionError) as context:\n encoder._dna_to_int(char)\n\n self.assertTrue(\"Char '{}' is not in\".format(char) in str(context.exception))\n\n def test_dnaToInt_char_valid_returns_int(self):\n encoder = PrgEncoder()\n char = \"A\"\n\n actual = encoder._dna_to_int(char)\n expected = 1\n\n self.assertEqual(actual, expected)\n\n def test_dnaToInt_char_valid_but_lowercase_returns_int(self):\n encoder = PrgEncoder()\n char = \"a\"\n\n actual = encoder._dna_to_int(char)\n expected = 1\n\n self.assertEqual(actual, expected)\n\n def test_dnaToInt_char_valid_non_default_encoding(self):\n encoder = PrgEncoder(encoding={\"A\": 7})\n char = \"a\"\n\n actual = encoder._dna_to_int(char)\n expected = 7\n\n self.assertEqual(actual, expected)\n\n def test_encode_unit_empty_string_raises_error(self):\n encoder = PrgEncoder()\n unit = \"\"\n\n with self.assertRaises(EncodeError) as err:\n encoder._encode_unit(unit)\n\n self.assertTrue(\"Cannot encode an empty string\")\n\n @patch.object(PrgEncoder, \"_dna_to_int\", side_effect=[1, 2, 3, 4])\n def test_encode_unit_dna_returns_list_of_ints_between_1_and_4(\n self, mock_method: Mock\n ):\n encoder = PrgEncoder()\n unit = \"ACGT\"\n\n actual = encoder._encode_unit(unit)\n expected = [1, 2, 3, 4]\n\n self.assertEqual(actual, expected)\n\n calls = [call(c) for c in \"ACGT\"]\n mock_method.assert_has_calls(calls)\n\n self.assertEqual(mock_method.call_count, 4)\n\n @given(integers(min_value=0))\n def test_encode_unit_integer_string_returns_list_with_just_that_int(self, unit):\n encoder = PrgEncoder()\n\n actual = encoder._encode_unit(str(unit))\n expected = [unit]\n\n self.assertEqual(actual, expected)\n\n def test_encode_unit_prg_with_invalid_chars_raises_error(self):\n encoder = PrgEncoder()\n unit = \"foo\"\n\n with self.assertRaises(EncodeError) as context:\n encoder._encode_unit(unit)\n\n self.assertTrue(\n \"Unit {} contains invalid characters\".format(unit) in str(context.exception)\n )\n\n def test_encode_empty_string_returns_empty(self):\n encoder = PrgEncoder()\n prg = \"\"\n\n actual = encoder.encode(prg)\n expected = []\n\n self.assertEqual(actual, expected)\n\n def test_encode_prg_with_one_site_and_one_alt(self):\n encoder = PrgEncoder()\n prg = \" 5 6 C 5 \"\n\n actual = encoder.encode(prg)\n expected = [5, 6, 2, 5]\n\n self.assertEqual(actual, expected)\n\n def test_encode_prg_with_one_site_and_one_alt_no_spaces_at_ends(self):\n encoder = PrgEncoder()\n prg = \"5 6 C 5\"\n\n actual = encoder.encode(prg)\n expected = [5, 6, 2, 5]\n\n self.assertEqual(actual, expected)\n\n def test_encode_prg_with_one_site_and_two_alts(self):\n encoder = PrgEncoder()\n prg = \"5 A 6 C 5\"\n\n actual = encoder.encode(prg)\n expected = [5, 1, 6, 2, 5]\n\n self.assertEqual(actual, expected)\n\n def test_encode_prg_with_one_site_and_two_alts_longer_than_one_base(self):\n encoder = PrgEncoder()\n prg = \"5 GA 6 CT 5\"\n\n actual = encoder.encode(prg)\n expected = [5, 3, 1, 6, 2, 4, 5]\n\n self.assertEqual(actual, expected)\n\n def test_encode_prg_with_long_site_numbers_and_two_alts_longer_than_one_base(self):\n encoder = PrgEncoder()\n prg = \"55 GA 63 Ct 55\"\n\n actual = encoder.encode(prg)\n expected = [55, 3, 1, 63, 2, 4, 55]\n\n self.assertEqual(actual, expected)\n\n def test_encode_prg_with_space_at_start_and_only_letters(self):\n encoder = PrgEncoder()\n prg = \" a \"\n\n actual = encoder.encode(prg)\n expected = [1]\n\n self.assertEqual(actual, expected)\n\n @given(from_regex(r\" ?[0-9]* [ACGTacgt]* [0-9]* \", fullmatch=True))\n def test_encode_permutations_of_valid_input(self, prg):\n encoder = PrgEncoder()\n\n encoder.encode(prg)\n\n self.assertTrue(True)\n\n def test_write_encoding_to_empty_encoding_writes_nothing(self):\n encoding = []\n write_to = BytesIO()\n PrgEncoder.write_encoding_to(encoding, write_to)\n write_to.seek(0)\n\n self.assertEqual(write_to.read(), b\"\")\n\n def test_write_encoding_to_encoding_with_one_int(self):\n encoding = [1]\n write_to = BytesIO()\n PrgEncoder.write_encoding_to(encoding, write_to)\n write_to.seek(0)\n\n actual = write_to.read()\n expected = int(1).to_bytes(4, \"little\")\n\n self.assertEqual(actual, expected)\n\n def test_write_encoding_to_encoding_with_two_ints(self):\n encoding = [1, 4]\n write_to = BytesIO()\n PrgEncoder.write_encoding_to(encoding, write_to)\n write_to.seek(0)\n\n actual = write_to.read()\n expected = int(1).to_bytes(4, \"little\") + int(4).to_bytes(4, \"little\")\n\n self.assertEqual(actual, expected)\n", "sub_path": "tests/test_prg_encoder.py", "file_name": "test_prg_encoder.py", "file_ext": "py", "file_size_in_byte": 5815, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "unittest.TestCase", "line_number": 11, "usage_type": "attribute"}, {"api_name": "make_prg.prg_encoder.PrgEncoder", "line_number": 13, "usage_type": "call"}, {"api_name": "make_prg.prg_encoder.ConversionError", "line_number": 16, "usage_type": "argument"}, {"api_name": "make_prg.prg_encoder.PrgEncoder", "line_number": 23, "usage_type": "call"}, {"api_name": "make_prg.prg_encoder.ConversionError", "line_number": 25, "usage_type": "argument"}, {"api_name": "hypothesis.given", "line_number": 21, "usage_type": "call"}, {"api_name": "hypothesis.strategies.text", "line_number": 21, "usage_type": "call"}, {"api_name": "hypothesis.strategies.characters", "line_number": 21, "usage_type": "call"}, {"api_name": "make_prg.prg_encoder.PrgEncoder", "line_number": 31, "usage_type": "call"}, {"api_name": "make_prg.prg_encoder.PrgEncoder", "line_number": 40, "usage_type": "call"}, {"api_name": "make_prg.prg_encoder.PrgEncoder", "line_number": 49, "usage_type": "call"}, {"api_name": "make_prg.prg_encoder.PrgEncoder", "line_number": 58, "usage_type": "call"}, {"api_name": "make_prg.prg_encoder.EncodeError", "line_number": 61, "usage_type": "argument"}, {"api_name": "unittest.mock.Mock", "line_number": 68, "usage_type": "name"}, {"api_name": "make_prg.prg_encoder.PrgEncoder", "line_number": 70, "usage_type": "call"}, {"api_name": "unittest.mock.call", "line_number": 78, "usage_type": "call"}, {"api_name": "unittest.mock.patch.object", "line_number": 66, "usage_type": "call"}, {"api_name": "make_prg.prg_encoder.PrgEncoder", "line_number": 66, "usage_type": "argument"}, {"api_name": "unittest.mock.patch", "line_number": 66, "usage_type": "name"}, {"api_name": "make_prg.prg_encoder.PrgEncoder", "line_number": 85, "usage_type": "call"}, {"api_name": "hypothesis.given", "line_number": 83, "usage_type": "call"}, {"api_name": "hypothesis.strategies.integers", "line_number": 83, "usage_type": "call"}, {"api_name": "make_prg.prg_encoder.PrgEncoder", "line_number": 93, "usage_type": "call"}, {"api_name": "make_prg.prg_encoder.EncodeError", "line_number": 96, "usage_type": "argument"}, {"api_name": "make_prg.prg_encoder.PrgEncoder", "line_number": 104, "usage_type": "call"}, {"api_name": "make_prg.prg_encoder.PrgEncoder", "line_number": 113, "usage_type": "call"}, {"api_name": "make_prg.prg_encoder.PrgEncoder", "line_number": 122, "usage_type": "call"}, {"api_name": "make_prg.prg_encoder.PrgEncoder", "line_number": 131, "usage_type": "call"}, {"api_name": "make_prg.prg_encoder.PrgEncoder", "line_number": 140, "usage_type": "call"}, {"api_name": "make_prg.prg_encoder.PrgEncoder", "line_number": 149, "usage_type": "call"}, {"api_name": "make_prg.prg_encoder.PrgEncoder", "line_number": 158, "usage_type": "call"}, {"api_name": "make_prg.prg_encoder.PrgEncoder", "line_number": 168, "usage_type": "call"}, {"api_name": "hypothesis.given", "line_number": 166, "usage_type": "call"}, {"api_name": "hypothesis.strategies.from_regex", "line_number": 166, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 176, "usage_type": "call"}, {"api_name": "make_prg.prg_encoder.PrgEncoder.write_encoding_to", "line_number": 177, "usage_type": "call"}, {"api_name": "make_prg.prg_encoder.PrgEncoder", "line_number": 177, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 184, "usage_type": "call"}, {"api_name": "make_prg.prg_encoder.PrgEncoder.write_encoding_to", "line_number": 185, "usage_type": "call"}, {"api_name": "make_prg.prg_encoder.PrgEncoder", "line_number": 185, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 195, "usage_type": "call"}, {"api_name": "make_prg.prg_encoder.PrgEncoder.write_encoding_to", "line_number": 196, "usage_type": "call"}, {"api_name": "make_prg.prg_encoder.PrgEncoder", "line_number": 196, "usage_type": "name"}]} +{"seq_id": "477590072", "text": "from os import path\nfrom typing import Callable, Iterator, Optional\nfrom enum import Enum\n\nclass Position(Enum):\n FLOOR = '.'\n EMPTY_SEAT = 'L'\n OCCUPIED_SEAT = '#'\n\n def __str__(self):\n return self.value\n\nSeatLayout = list[list[Position]]\nApplySeatingRulesFn = Callable[[SeatLayout, int, int], Position]\n\ndef parse_seat_layout(input_file: str) -> SeatLayout:\n with open(input_file) as f:\n return [\n [Position(pos) for pos in line]\n for line \n in f.read().splitlines()\n ]\n\ndef print_seat_layout(seat_layout: SeatLayout):\n for seat_line in seat_layout:\n print(*seat_line, sep = '')\n\ndef apply_seating_rules_p1(seat_layout: SeatLayout, row: int, col: int) -> Position:\n nearby_occupied_seats = count_nearby_occupied_seats(seat_layout, row, col)\n \n pos = seat_layout[row][col]\n if pos == Position.EMPTY_SEAT and nearby_occupied_seats == 0:\n return Position.OCCUPIED_SEAT\n elif pos == Position.OCCUPIED_SEAT and nearby_occupied_seats >= 4:\n return Position.EMPTY_SEAT\n else:\n return pos\n\ndef get_nearby_seats(seat_layout: SeatLayout, center_row: int, center_col: int) -> Iterator[Position]:\n for row in yield_axis_var(center_row, len(seat_layout), radius = 1):\n seat_line = seat_layout[row]\n\n for col in yield_axis_var(center_col, len(seat_line), radius = 1):\n if col == center_col and row == center_row:\n continue\n else:\n yield seat_line[col]\n\ndef count_nearby_occupied_seats(seat_layout: SeatLayout, center_row: int, center_col: int) -> int:\n nearby_seats = list(get_nearby_seats(seat_layout, center_row, center_col))\n return nearby_seats.count(Position.OCCUPIED_SEAT)\n\ndef apply_seating_rules_p2(seat_layout: SeatLayout, row: int, col: int) -> Position:\n visible_occupied_seats = count_visible_occupied_seats(seat_layout, row, col)\n \n pos = seat_layout[row][col]\n if pos == Position.EMPTY_SEAT and visible_occupied_seats == 0:\n return Position.OCCUPIED_SEAT\n elif pos == Position.OCCUPIED_SEAT and visible_occupied_seats >= 5:\n return Position.EMPTY_SEAT\n else:\n return pos\n\ndef find_next_seat(seat_layout: SeatLayout, curr_row: int, curr_col: int, dr: int, dc: int) -> Optional[Position]:\n new_row = curr_row + dr\n new_col = curr_col + dc\n if 0 <= new_row < len(seat_layout):\n seat_line = seat_layout[new_row]\n if 0 <= new_col < len(seat_line):\n pos = seat_line[new_col]\n if pos != Position.FLOOR:\n return pos\n else:\n return find_next_seat(seat_layout, new_row, new_col, dr, dc)\n\ndef get_cardinal_visible_seats(seat_layout: SeatLayout, center_row: int, center_col: int) -> Iterator[Position]:\n for dr in [-1, 0, 1]:\n for dc in [-1, 0, 1]:\n if dr == 0 and dc == 0:\n continue\n \n pos = find_next_seat(seat_layout, center_row, center_col, dr, dc)\n if pos is not None:\n yield pos\n\ndef count_visible_occupied_seats(seat_layout: SeatLayout, center_row: int, center_col: int) -> int:\n visible_seats = list(get_cardinal_visible_seats(seat_layout, center_row, center_col))\n return visible_seats.count(Position.OCCUPIED_SEAT)\n\ndef yield_axis_var(center: int, limit: int, radius: int) -> Iterator[int]:\n if center - radius >= 0:\n yield center - radius\n \n yield center\n\n if center + radius < limit:\n yield center + radius\n\ndef simulate_seating_round(seat_layout: SeatLayout, apply_rules_fn: ApplySeatingRulesFn) -> SeatLayout:\n return [\n [\n apply_rules_fn(seat_layout, row, col)\n for col\n in range(len(seat_layout[0]))\n ]\n for row\n in range(len(seat_layout))\n ]\n\ndef simulate_seating(seat_layout: SeatLayout, apply_rules_fn: ApplySeatingRulesFn, debug: bool = False) -> SeatLayout:\n i = 0\n if debug:\n print('Iteration #', i)\n print_seat_layout(seat_layout)\n print()\n\n while True:\n new_seat_layout = simulate_seating_round(seat_layout, apply_rules_fn)\n if new_seat_layout == seat_layout:\n break\n else:\n seat_layout = new_seat_layout\n\n if debug:\n i += 1\n print('Iteration #', i)\n print_seat_layout(seat_layout)\n print()\n \n return new_seat_layout\n\ndef count_occupied_seats(seat_layout: SeatLayout) -> int:\n if seat_layout == []:\n return 0\n else:\n first_line, *other_lines = seat_layout\n return first_line.count(Position.OCCUPIED_SEAT) + count_occupied_seats(other_lines)\n\ndef solve(input_file: str) -> None:\n print(f'[{input_file}]')\n full_path = path.join(path.dirname(__file__), input_file)\n\n seat_layout = parse_seat_layout(full_path)\n final_seat_layout_p1 = simulate_seating(seat_layout, apply_seating_rules_p1)\n\n print('Part 1 answer:', count_occupied_seats(final_seat_layout_p1))\n\n final_seat_layout_p2 = simulate_seating(seat_layout, apply_seating_rules_p2)\n print('Part 2 answer:', count_occupied_seats(final_seat_layout_p2))\n print()\n\nif __name__ == '__main__':\n solve('example.txt')\n solve('input.txt')", "sub_path": "day11/day11.py", "file_name": "day11.py", "file_ext": "py", "file_size_in_byte": 5288, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "enum.Enum", "line_number": 5, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 14, "usage_type": "name"}, {"api_name": "typing.Iterator", "line_number": 39, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 64, "usage_type": "name"}, {"api_name": "typing.Iterator", "line_number": 76, "usage_type": "name"}, {"api_name": "typing.Iterator", "line_number": 90, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 141, "usage_type": "call"}, {"api_name": "os.path", "line_number": 141, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 141, "usage_type": "call"}]} +{"seq_id": "216904679", "text": "#!/usr/bin/python3\n\"\"\"\nAuthor:ZAG\nA pair of scripts for unidirectional UDP benchmarks:\n- receiver is run in while-true loop\n- sender send N packets (1M by default) of SIZE (64 bytes by default) with DELAY (0) between packets\n- Each udp packet is enumerated\n- Missed and disordered packets are treated like errors\n- Errors are counted on receiver side\n- each packet also contains timestamp to measure latency (receiver should be running on the same host)\n\"\"\"\n\nimport socket\nfrom struct import pack,unpack\nfrom time import perf_counter, sleep\nimport argparse\n\n\ndef sender(UDP_IP=\"127.0.0.1\", UDP_PORT=5555, N=1000000, size=64, delay=0):\n print(\"Starting sender (receiver should be already running)\")\n print(\"target ip:\",UDP_IP)\n print(\"target port:\",UDP_PORT)\n print(\"N packets:\",N)\n print(\"packet size:\",size)\n print(\"delay between packets:\", delay)\n MESSAGE = bytearray([48]*size)\n\n sock = socket.socket(socket.AF_INET, # Internet\n socket.SOCK_DGRAM) # UDP\n while True:\n for i in range(0, N):\n sock.sendto(pack(\"I\",i) + MESSAGE, (UDP_IP, UDP_PORT))\n sleep(delay)\n #death packet\n print(\"sending death packet\")\n sock.sendto(bytearray([0]*1), (UDP_IP, UDP_PORT))\n \n print(\"Complete\")\n\ndef receiver(UDP_IP=\"0.0.0.0\", UDP_PORT=5555):\n print(\"UDP target IP:\", UDP_IP)\n print(\"UDP target port:\", UDP_PORT)\n sock = socket.socket(socket.AF_INET, # Internet\n socket.SOCK_DGRAM) # UDP\n sock.bind((UDP_IP, UDP_PORT))\n \n while True: \n data = [] #received data\n #for i in range(0,1000000):\n n_packets = 0\n while True:\n _data, addr = sock.recvfrom(10000) # buffer size is 1024 bytes\n n_packets += 1\n \n if len(_data) > 1:\n data.append(_data[:4])\n else:\n print(\"got death packet, analyzing...\")\n break\n #end for was here\n\n distro = {}\n cur_packet = 0\n n_bad_packets = 0\n for i in range(0,n_packets-1):\n next_packet = unpack('I',data[i])[0]\n if cur_packet != 0:\n delta = next_packet - cur_packet\n else:\n delta = 1\n if delta in distro:\n distro[delta] += 1\n else:\n distro[delta] = 1\n cur_packet = next_packet\n\n print(\"total: \", n_packets, \"\\n\")\n for key in distro.keys():\n print(\"distro: \", key, ':', distro[key],\"\\t\")\n \n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='A pair of scripts for unidirectional UDP benchmarks:\\\n- receiver is run in while-true loop\\\n- sender send N packets (1M by default) of SIZE (64 bytes by default, 9K max) with DELAY (0) between packets\\\n- Each udp packet is enumerated\\\n- Missed and disordered packets are treated like errors\\\n- Errors are counted on receiver side\\\n- each packet also contains timestamp to measure latency (receiver should be running on the same host)')\n parser.add_argument(\"-S\", \"--SENDER\", dest=\"SENDER\", default=False, help=\"is sender (receiver by default)\", metavar=\"SENDER\")\n parser.add_argument(\"-i\", \"--ipaddress\", dest=\"ipaddress\", default=\"127.0.0.1\", help=\"ip address\")\n parser.add_argument(\"-p\", \"--udpport\", dest=\"udpport\", default=5555, help=\"udp-ip port\")\n \n parser.add_argument(\"-n\", \"--N\", dest=\"N\", default=1000000, help=\"number of generated packets (SENDER only)\")\n parser.add_argument(\"-s\", \"--size\", dest=\"size\", default=64, help=\"default packet size in bytes, min - 10B, max - 9KB (SENDER only)\")\n parser.add_argument(\"-d\", \"--delay\", dest=\"delay\", default=0, help=\"delay between packets in us, default 0 (SENDER only)\")\n \n \n args = parser.parse_args()\n #TODO: validate args and pass to functions\n #TODO: add min-max-step for packet size and delay\n #TODO: substract \n if args.SENDER:\n sender(N=int(args.N), delay=float (args.delay), size = int(args.size))\n else:\n receiver()\n\n\n\n", "sub_path": "udp_bench.py", "file_name": "udp_bench.py", "file_ext": "py", "file_size_in_byte": 3970, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "socket.socket", "line_number": 28, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 28, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 29, "usage_type": "attribute"}, {"api_name": "struct.pack", "line_number": 32, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 33, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 43, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 43, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 44, "usage_type": "attribute"}, {"api_name": "struct.unpack", "line_number": 66, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "332270348", "text": "import logging\nimport logging.handlers\nimport asyncio\nfrom plc_module_class import PlcModuleClass, PlcVariableTags\nfrom influxdb_module_class import InfluxDBModuleClass\nimport copy\n\n\n\n\nclass PlcLooper():\n def __init__(self, plc_client: PlcModuleClass, influxdb_client: InfluxDBModuleClass):\n self.plc_client = plc_client\n self.influx_client = influxdb_client\n\n\n async def plc_2_influx_db_variables(self, meas_dict, samplint_time):\n while True:\n try:\n # lista di task per la lettura dal plc in modo asincrono\n tasks = [self.plc_client.read_var_async(m) for m in meas_dict]\n # attesa della fine di tutti i task\n await asyncio.wait(tasks)\n # scrittura dati acquisiti su influxDB\n asyncio.gather(self.influx_client.write_all_data(meas_dict))\n except Exception as e:\n logging.critical(f'::error: {e}::')\n # tempo di campionamento dal plc\n await asyncio.sleep(samplint_time)\n\n\n########################################################################################################################\n### MAIN STARTER\n\n def start_testing(self):\n # avvio lettura da database e scrittura su plc\n #asyncio.run(self.mssql_2_plc_variables())\n if len(self.plc_client.measurement_list_dict) <=0 :\n return\n loop = asyncio.get_event_loop()\n for group in self.plc_client.measurement_list_dict:\n sampling_time=group[PlcVariableTags.R_SAMPLING_TIME_MS.name]/1000\n measurements=group[PlcVariableTags.MEASUREMENTS.name]\n loop.create_task(self.plc_2_influx_db_variables(meas_dict=copy.deepcopy(measurements), samplint_time=copy.deepcopy(sampling_time)))\n loop.run_forever()\n \n\n\n \n", "sub_path": "Modbus2DBs/PLC_Schneider_Modbus_2_Influx/plc_reader/plc_looper.py", "file_name": "plc_looper.py", "file_ext": "py", "file_size_in_byte": 1839, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "plc_module_class.PlcModuleClass", "line_number": 12, "usage_type": "name"}, {"api_name": "influxdb_module_class.InfluxDBModuleClass", "line_number": 12, "usage_type": "name"}, {"api_name": "asyncio.wait", "line_number": 23, "usage_type": "call"}, {"api_name": "asyncio.gather", "line_number": 25, "usage_type": "call"}, {"api_name": "logging.critical", "line_number": 27, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 29, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 40, "usage_type": "call"}, {"api_name": "plc_module_class.PlcVariableTags.R_SAMPLING_TIME_MS", "line_number": 42, "usage_type": "attribute"}, {"api_name": "plc_module_class.PlcVariableTags", "line_number": 42, "usage_type": "name"}, {"api_name": "plc_module_class.PlcVariableTags.MEASUREMENTS", "line_number": 43, "usage_type": "attribute"}, {"api_name": "plc_module_class.PlcVariableTags", "line_number": 43, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "203374943", "text": "import json\nimport os\n\nimport logging\n\nimport sys\nimport textwrap\nimport threading\n\nimport time\nimport tweepy\nimport tweepy.error\n\nfrom ..School import School\nfrom .NotificationDispatcher import NotificationDispatcher\n\n\nclass TweetQueue(threading.Thread):\n\n def __init__(self, dispatcher):\n super(TweetQueue, self).__init__()\n self.dispatcher = dispatcher\n self.queue = []\n\n def run(self):\n while True:\n if len(self.queue) > 0:\n tweet = self.queue.pop(0)\n self.dispatcher.api.update_status(tweet)\n time.sleep(60)\n\n\nclass TwitterDispatcher(NotificationDispatcher):\n\n dispatcher_name = \"TWITTER\"\n\n def __init__(self):\n self.logger = logging.getLogger(\"TwitterDispatcher\")\n self.logger.debug(\"Loading Twitter config info...\")\n if os.path.isfile(os.path.join(os.getcwd(), \"twitter_config.json\")):\n with open(os.path.join(os.getcwd(), \"twitter_config.json\"), \"r\") as f:\n self.config = json.load(f)\n else:\n self.logger.error(\"twitter_config.json not found, creating...\")\n self.config = {\n \"CONSUMER_KEY\": \"\",\n \"CONSUMER_SECRET\": \"\",\n \"ACCESS_KEY\": \"\",\n \"ACCESS_SECRET\": \"\",\n \"TWEET_FORMAT\": \"Status for {twitter} updated. New status: {status} #nlschools\"\n }\n with open(os.path.join(os.getcwd(), \"twitter_config.json\"), \"w\") as f:\n json.dump(self.config, f, indent=4, sort_keys=True)\n\n try:\n self.logger.debug(\"Authenticating with Twitter...\")\n self.auth = tweepy.OAuthHandler(self.config[\"CONSUMER_KEY\"], self.config[\"CONSUMER_SECRET\"])\n self.auth.set_access_token(self.config[\"ACCESS_KEY\"], self.config[\"ACCESS_SECRET\"])\n self.api = tweepy.API(self.auth)\n if self.api.verify_credentials():\n self.logger.debug(\"Authenticated with Twitter.\")\n except tweepy.error.TweepError:\n self.logger.error(\"Failed to authenticate with Twitter. Please correct the settings in \"\n \"twitter_config.json and restart.\")\n sys.exit(1)\n\n self.queue = TweetQueue(self)\n self.queue.start()\n\n def dispatch_notification(self, school: School, new_status: str):\n twitter = getattr(school, \"twitter\", school.name)\n tweet = self.config[\"TWEET_FORMAT\"].format(twitter=twitter, status=new_status)\n truncated_tweet = textwrap.shorten(tweet, 140, placeholder=\"...\")\n if not os.getenv(\"SCHOOLTRACKER_DEBUG\", False):\n self.queue.queue.append(truncated_tweet)\n else:\n self.logger.debug(truncated_tweet)\n\n\n", "sub_path": "SchoolTracker/Dispatcher/TwitterDispatcher.py", "file_name": "TwitterDispatcher.py", "file_ext": "py", "file_size_in_byte": 2751, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "threading.Thread", "line_number": 18, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 30, "usage_type": "call"}, {"api_name": "NotificationDispatcher.NotificationDispatcher", "line_number": 33, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 41, "usage_type": "call"}, {"api_name": "json.load", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 52, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 53, "usage_type": "call"}, {"api_name": "tweepy.OAuthHandler", "line_number": 57, "usage_type": "call"}, {"api_name": "tweepy.API", "line_number": 59, "usage_type": "call"}, {"api_name": "tweepy.error", "line_number": 62, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 65, "usage_type": "call"}, {"api_name": "School.School", "line_number": 70, "usage_type": "name"}, {"api_name": "textwrap.shorten", "line_number": 73, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 74, "usage_type": "call"}]} +{"seq_id": "176657058", "text": "import sqlite3\r\n\r\ndb_locale='students.db'\r\n\r\nconnie= sqlite3.connect(db_locale)\r\nc= connie.cursor()\r\n\r\nc.execute(\"\"\"\r\nSELECT * from contact_details\r\n\"\"\")\r\n\r\nstudent_info=c.fetchall()\r\nfor student in student_info:\r\n print(student)\r\n\r\nconnie.commit()\r\nconnie.close()", "sub_path": "db_query.py", "file_name": "db_query.py", "file_ext": "py", "file_size_in_byte": 267, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "sqlite3.connect", "line_number": 5, "usage_type": "call"}]} +{"seq_id": "616808628", "text": "import torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nimport torch.utils.data as data\nimport random\n\nfrom dataloader import all_letters, TrainDataset, collate_wrapper\nfrom dataloader import MAX_LENGTH, IndexToletter, TestDataset\nfrom test_criterion import Gaussian_score, compute_bleu\nfrom parser import argparser\nimport os\n\nSOS_token = 0\nEOS_token = 1\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass Seq2Seq_VAE(nn.Module):\n def __init__(self, voc_len=29, hidden_size=256, num_layers=2,\n latent_size=29, cond_embd_size=8,\n cond_len=4, dropout=0):\n super(Seq2Seq_VAE, self).__init__()\n\n self.cond_embd1 = nn.Embedding(cond_len, cond_embd_size)\n self.enoder = EncoderRNN(voc_len=voc_len, num_layers=num_layers,\n hidden_size=hidden_size,\n cond_embd_size=cond_embd_size)\n\n self.fc21 = nn.Linear(hidden_size, latent_size)\n self.fc22 = nn.Linear(hidden_size, latent_size)\n\n self.decoder = DecoderRNN(hidden_size=hidden_size,\n num_layers=num_layers,\n latent_size=latent_size,\n output_size=voc_len,\n cond_embd_size=cond_embd_size,\n dropout=dropout)\n\n self.latent_size = latent_size\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n\n def reparameterize(self, mu, logvar):\n std = torch.exp(0.5 * logvar)\n eps = torch.randn_like(std)\n return mu + eps * std\n\n def compute_KLD(self, mu, logvar):\n KLD = -0.5 * torch.mean(1 + logvar - mu.pow(2) - logvar.exp())\n return KLD\n\n def test_Gaussian(self, MAX_LENGTH=MAX_LENGTH):\n cond = torch.arange(4, device=device)\n Batch_size = cond.shape[0]\n\n # z:(MAX_LENGTH, BS, latent_size)\n z = torch.randn((MAX_LENGTH, 1, self.latent_size), device=device)\n z = z.repeat(1, Batch_size, 1)\n cembd = self.cond_embd1(cond)\n hidden = self.decoder.initHidden(z, cembd)\n decoder_input = torch.tensor([[SOS_token]*Batch_size], device=device)\n\n outputs = list()\n\n for di in range(MAX_LENGTH):\n # print('decoder_input',decoder_input.shape)\n output, hidden = self.decoder(decoder_input, hidden)\n topv, topi = output.topk(1)\n outputs.append(topi)\n\n decoder_input = topi.squeeze().view(1, -1).detach()\n return torch.cat(outputs).squeeze()\n\n def forward(self, x, cond1, cond2, criterion,\n teacher_forcing_ratio=0, is_train=True):\n target = x.detach()\n batch_size = cond1.shape[0]\n # cond: (BS)-> cembd: (BS,8)\n # x: (MAX_length, BS)\n # cond1: for Rnn1, cond2: for Rnn2\n cembd1 = self.cond_embd1(cond1) # (BS, cond_embd_size=8)\n\n # cembd = cembd.view(1, cembd.shape[0], cembd.shape[1])\n hid = self.enoder.initHidden(cond1.shape[0], cembd1)\n\n # en_output:(MAX_length, BS, hidden_size)\n en_output, hidden = self.enoder(x, hid)\n mu = self.fc21(en_output)\n logvar = self.fc22(en_output)\n z = self.reparameterize(mu, logvar) # z:(MAX_length, BS, latent_size)\n loss_KLD = self.compute_KLD(mu, logvar)\n loss_CE = 0\n\n cembd2 = self.cond_embd1(cond2) # (BS, cond_embd_size=8)\n\n target_length = x.shape[0]\n target = target.unsqueeze(0)\n hidden = self.decoder.initHidden(z, cembd2)\n decoder_input = torch.tensor([[SOS_token]*batch_size], device=device)\n # print(decoder_input.shape)\n outputs = list()\n use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False\n if is_train:\n if use_teacher_forcing:\n # Teacher forcing: Feed the target as the next input\n for di in range(target_length):\n # print('force:',decoder_input.shape)\n output, hidden = self.decoder(decoder_input, hidden)\n # print(output.shape, x[di].shape)\n loss_CE += criterion(output.squeeze(), x[di])\n # output = output[0]\n # loss_CE += criterion(output, x[di])\n topv, topi = output.topk(1)\n outputs.append(topi)\n if di != target_length-1:\n decoder_input = target[:, di+1] # Teacher forcing\n else:\n # Without teacher forcing: use its own predictions as the next input\n for di in range(target_length):\n # print('no force:', decoder_input.shape)\n output, hidden = self.decoder(decoder_input, hidden)\n # print(output.shape , x[di].shape)\n # print(di, output.shape, x[di].shape)\n # output = output[0]\n loss_CE += criterion(output.squeeze(), x[di])\n # loss_CE += criterion(output, x[di])\n topv, topi = output.topk(1)\n outputs.append(topi)\n decoder_input = topi.squeeze().view(1, -1).detach() # detach from history as input\n else: # testing\n for di in range(target_length):\n output, hidden = self.decoder(decoder_input, hidden)\n topv, topi = output.topk(1)\n outputs.append(topi)\n decoder_input = topi.squeeze().view(1, -1).detach() # detach from history as input\n if topi == EOS_token:\n break\n\n return torch.cat(outputs).squeeze(), loss_KLD, loss_CE\n\n\n# Encoder\nclass EncoderRNN(nn.Module):\n def __init__(self, voc_len, num_layers=2,\n hidden_size=256, cond_embd_size=8):\n super(EncoderRNN, self).__init__()\n self.num_layers = num_layers\n self.hidden_size = hidden_size\n self.voc_embd = nn.Embedding(voc_len, hidden_size)\n self.Rnn1 = nn.LSTM(input_size=hidden_size,\n hidden_size=hidden_size,\n num_layers=self.num_layers)\n self.cond_embd_size = cond_embd_size\n\n def forward(self, input, hidden):\n # x: (MAX_length, BS)\n # voc_embd: (MAX_length, BS, hidden_size)\n voc_embd = self.voc_embd(input)\n # output:(MAX_length, BS, hidden_size)\n output, hid = self.Rnn1(voc_embd, hidden)\n return output, hid\n\n def initHidden(self, Batch_size, cembd):\n # cembd:(BS, cond_embd_size=8)\n cond_embd = cembd.unsqueeze(0) # (1,BS, cond_embd_size=8)\n cond_embd = cond_embd.repeat(self.num_layers, 1, 1)\n hid = torch.zeros(self.num_layers, Batch_size,\n self.hidden_size-self.cond_embd_size,\n device=device)\n hidden = torch.cat((hid, cond_embd), dim=-1)\n return (hidden, hidden)\n\n\n# Decoder\nclass DecoderRNN(nn.Module):\n def __init__(self, hidden_size=256, num_layers=2,\n latent_size=29, output_size=29,\n cond_embd_size=8, MAX_LENGTH=MAX_LENGTH, dropout=0):\n super(DecoderRNN, self).__init__()\n self.num_layers = num_layers\n self.hidden_size = hidden_size\n self.embedding_LatToHid = nn.Embedding(latent_size, hidden_size)\n\n self.Rnn2 = nn.LSTM(input_size=hidden_size,\n hidden_size=hidden_size,\n num_layers=num_layers, dropout=0.5)\n\n self.fc = nn.Linear(hidden_size, output_size)\n\n self.latent_to_hidden = nn.Linear(latent_size * MAX_LENGTH + cond_embd_size,\n hidden_size)\n\n def forward(self, input, hidden):\n # input:(BS, latent_size)\n # cond_embd (BS, cond_embd_size=8)\n # cat cond_embd with voc_embd-> (BS, latent+cond_embd_size)\n output = self.embedding_LatToHid(input)\n output, hid = self.Rnn2(output, hidden)\n output = self.fc(output)\n return output, hid\n\n def initHidden(self, zi, cond_embd):\n # cembd:(BS, cond_embd_size=8)\n # zi:(MAX_length, BS, latent_size)\n batch_size = zi.shape[1]\n cond_embd = cond_embd.unsqueeze(0) # (1,BS, cond_embd_size=8)\n\n cond_embd = cond_embd.repeat(self.num_layers, 1, 1)\n\n zi = zi.transpose(0, 1)\n zi = zi.reshape(1, batch_size, -1) # (1, BS, laten*MAX)\n zi = zi.repeat(self.num_layers, 1, 1)\n output = torch.cat([zi, cond_embd], dim=-1)\n hid = self.latent_to_hidden(output)\n hid = hid.cuda(device)\n\n # print('hid',hid.shape)\n return (hid, hid)\n\n\ndef train(model, dataloader, criterion,\n optimizer, epoch,\n teacher_forcing_ratio, KL_weight):\n\n model.train()\n acc = 0\n total_num_words = 0\n avg_KLD_loss = 0\n avg_CE_loss = 0\n for batch_ndx, (letter, cond) in enumerate(dataloader):\n\n cur_num_words = cond.shape[0]\n total_num_words += cur_num_words\n letter = letter.cuda(device)\n cond = cond.cuda(device)\n\n output, KLD_loss, CE_loss = model(letter, cond, cond,\n criterion,\n teacher_forcing_ratio)\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss = CE_loss + KLD_loss * KL_weight\n avg_CE_loss += CE_loss\n avg_KLD_loss += KLD_loss\n\n loss.backward()\n optimizer.step()\n\n output = output.transpose(1, 0)\n letter = letter.transpose(1, 0)\n for pred, gt in zip(output,letter):\n pred_str = IndexToletter(pred)\n gt_str = IndexToletter(gt)\n acc += compute_bleu(pred_str, gt_str)\n # if batch_ndx == epoch or epoch == 9:\n # print(gt_str,\"\\t\",pred_str)\n\n # if batch_ndx % 500 == 0:\n # print(f\"Epoch:{epoch:} {batch_ndx:} BLEU4:{acc/total_num_words:.2f} \"\n # f\"loss:{loss:.4f} KLD_loss:{KLD_loss:.4f} CE_loss:{CE_loss:.4f}\")\n avg_KLD_loss = avg_KLD_loss/batch_ndx\n avg_CE_loss = avg_CE_loss/batch_ndx\n avg_bleu4 = acc/total_num_words\n\n print(f\"Epoch:{epoch:} Training BLEU4:{avg_bleu4:2.2f} KLD_loss:{avg_KLD_loss:2.2f} CE_loss:{avg_CE_loss:2.2f}\")\n return avg_bleu4, avg_CE_loss, avg_KLD_loss\n\n\ndef test_tense_conversion(model, test_dataset, criterion):\n acc = 0\n total_num_words = 0\n\n model.eval()\n\n for idx in range(test_dataset.__len__()):\n voc1, voc2, cond1, cond2 = test_dataset.__getitem__(idx)\n # print(voc1.shape)\n # print(voc2.shape)\n total_num_words += 1\n\n voc1 = voc1.cuda(device)\n cond1 = cond1.cuda(device)\n cond2 = cond2.cuda(device)\n\n output, KLD_loss, CE_loss = model(voc1, cond1, cond2,\n criterion, 0,\n is_train=False)\n voc1 = voc1.squeeze()\n voc2 = voc2.squeeze()\n pred_str = IndexToletter(output)\n gt_str = IndexToletter(voc2)\n in_str = IndexToletter(voc1)\n acc += compute_bleu(pred_str, gt_str)\n # print(f\"\\ninput: {in_str:}\")\n # print(f\"gt_str: {gt_str:}\")\n # print(f\"pred_str: {pred_str:}\")\n # print(f\"BLEU4: {acc/total_num_words:.2f}\")\n\n acc = acc/total_num_words\n print(f\"Testing Bleu4 acc: {acc:.2f}\")\n return acc\n\n\ndef test_Gaussian(model, num_test):\n\n model.eval()\n result_list = list()\n for idx in range(num_test):\n output = model.test_Gaussian(MAX_LENGTH)\n output = output.transpose(1, 0)\n one_time_list = list()\n for i in (output):\n pred_str = IndexToletter(i)\n one_time_list.append(pred_str)\n result_list.append(one_time_list)\n # print(f\"pred_str: {one_time_list:}\")\n acc = Gaussian_score(result_list)\n # print(result_list)\n\n print(f\"Testing Gaussian acc: {acc:.2f}\")\n return acc\n\n\ndef main():\n args = argparser()\n # print(args)\n args = vars(args)\n # print('seed=',args['seed'])\n torch.manual_seed(args['seed'])\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n lr = args['lr']\n batch_size = args['batch_size']\n\n Data = TrainDataset()\n train_data_loader = data.DataLoader(Data, batch_size=batch_size,\n collate_fn=collate_wrapper,\n pin_memory=True,\n num_workers=args['workers'],\n shuffle=True)\n test_data = TestDataset()\n\n model = Seq2Seq_VAE(dropout=args['dropout'])\n model.cuda(device)\n\n criterion = nn.CrossEntropyLoss()\n criterion.cuda(device)\n\n optimizer = torch.optim.SGD(model.parameters(),\n lr,\n momentum=args['momentum'],\n weight_decay=1e-4)\n\n best_bleu4_acc = 0\n best_cond_acc = 0\n CE_list = list()\n KLD_list = list()\n trian_bleu4_list = list()\n test_bleu4_list = list()\n test_cond_list = list()\n model_para = 'dropout' + str(args['dropout']) + '_lr' + str(lr) \\\n + \"_bs\" + str(batch_size) + \"_klannel_\" \\\n + str(args['KL_annealing'])\n\n if args['resume']:\n if os.path.isfile(args['resume']):\n print(f\"resume ckpt from: {args['resume']:}\")\n checkpoint = torch.load(args['resume'],\n map_location=device)\n best_bleu4_acc = checkpoint['best_bleu4_acc']\n best_cond_acc = checkpoint['best_cond_acc']\n model.load_state_dict(checkpoint['model_state_dict'])\n\n # for plot\n CE_list = checkpoint['CE_list']\n KLD_list = checkpoint['KLD_list']\n trian_bleu4_list = checkpoint['trian_bleu4_list']\n test_bleu4_list = checkpoint['test_bleu4_list']\n test_cond_list = checkpoint['test_cond_list']\n\n # print(f\"Resume ckpt (1)Best bleu4:{best_bleu4_acc:2.2f}\"\n # f\" (2)Best cond:{best_cond_acc:2.2f}\")\n else:\n print(\"=> no checkpoint found at '{}'\".format(args['resume']))\n\n if args['evaluate'] is True:\n model.eval()\n # print(args['seed'])\n test_bleu4 = test_tense_conversion(model, test_data, criterion)\n test_cond = test_Gaussian(model, 100)\n return 0\n best_bleu4_acc = 0\n best_cond_acc = 0\n for epoch in range(args['epochs']):\n # KL cost annealing\n if args['epochs'] == 'mono':\n KL_weight = epoch * 0.2 # Monotonic\n if KL_weight >= 1:\n KL_weight = 1\n else: # KL cycle cost annealing\n if (epoch % 10) <= 5:\n KL_weight = epoch * 0.2\n else:\n KL_weight = 1\n\n if (epoch % 10) <= 5:\n teacher_forcing_ratio = 1 - epoch * 0.2\n else:\n teacher_forcing_ratio = 0\n\n if epoch < 10:\n teacher_forcing_ratio = 0.8\n\n trian_bleu4, CE_loss, KLD_loss = train(model, train_data_loader,\n criterion, optimizer,\n epoch, teacher_forcing_ratio,\n KL_weight)\n test_bleu4 = test_tense_conversion(model, test_data, criterion)\n test_cond = test_Gaussian(model, 100)\n\n CE_list.append(CE_loss)\n KLD_list.append(KLD_loss)\n trian_bleu4_list.append(trian_bleu4)\n test_bleu4_list.append(test_bleu4)\n test_cond_list.append(test_cond)\n\n if test_cond > best_cond_acc and test_bleu4 > best_bleu4_acc:\n torch.save({'epoch': epoch+1,\n 'model_state_dict': model.state_dict(),\n 'encoder_optimizer': optimizer.state_dict(),\n 'lr': lr,\n 'best_bleu4_acc': best_bleu4_acc,\n 'best_cond_acc': best_cond_acc,\n 'CE_list': CE_list,\n 'KLD_list': KLD_list,\n 'trian_bleu4_list': trian_bleu4_list,\n 'test_bleu4_list': test_bleu4_list,\n 'test_cond_list': test_cond_list\n }, model_para+\"_best_bleu4_and_gaussian_model.pth\")\n print(f\"Save best Gaussian model: {best_bleu4_acc:.2f} {best_cond_acc:.2f}\")\n\n if test_bleu4 > best_bleu4_acc:\n best_bleu4_acc = test_bleu4\n torch.save({'epoch': epoch+1,\n 'model_state_dict': model.state_dict(),\n 'encoder_optimizer': optimizer.state_dict(),\n 'lr': lr,\n 'best_bleu4_acc': best_bleu4_acc,\n 'best_cond_acc': test_cond,\n 'CE_list': CE_list,\n 'KLD_list': KLD_list,\n 'trian_bleu4_list': trian_bleu4_list,\n 'test_bleu4_list': test_bleu4_list,\n 'test_cond_list': test_cond_list\n }, model_para+\"_best_bleu4_model.pth\")\n print(f\"Save best BLEU4 model: {best_bleu4_acc:.2f} {best_cond_acc:.2f}\")\n\n if test_cond > best_cond_acc:\n best_cond_acc = test_cond\n torch.save({'epoch': epoch+1,\n 'model_state_dict': model.state_dict(),\n 'encoder_optimizer': optimizer.state_dict(),\n 'lr': lr,\n 'best_bleu4_acc': test_bleu4,\n 'best_cond_acc': best_cond_acc,\n 'CE_list': CE_list,\n 'KLD_list': KLD_list,\n 'trian_bleu4_list': trian_bleu4_list,\n 'test_bleu4_list': test_bleu4_list,\n 'test_cond_list': test_cond_list\n }, model_para+\"_best_gaussian_model.pth\")\n print(f\"Save best Gaussian model: {best_bleu4_acc:.2f} {best_cond_acc:.2f}\")\n print(f\"Final Result (1)Best bleu4:{best_bleu4_acc:2.2f} (2)Best cond:{best_cond_acc:2.2f}\")\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "lab4_Seq2Seq_CVAE/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 18392, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "torch.device", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 16, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 19, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.exp", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.randn_like", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 50, "usage_type": "call"}, {"api_name": "dataloader.MAX_LENGTH", "line_number": 53, "usage_type": "name"}, {"api_name": "torch.arange", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 58, "usage_type": "call"}, {"api_name": "dataloader.MAX_LENGTH", "line_number": 58, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 62, "usage_type": "call"}, {"api_name": "dataloader.MAX_LENGTH", "line_number": 66, "usage_type": "argument"}, {"api_name": "torch.cat", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 100, "usage_type": "call"}, {"api_name": "random.random", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 140, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 144, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 144, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 150, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 150, "usage_type": "name"}, {"api_name": "torch.nn.LSTM", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 151, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 168, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 171, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 176, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 176, "usage_type": "name"}, {"api_name": "dataloader.MAX_LENGTH", "line_number": 179, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 183, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 183, "usage_type": "name"}, {"api_name": "torch.nn.LSTM", "line_number": 185, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 185, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 189, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 189, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 191, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 191, "usage_type": "name"}, {"api_name": "dataloader.MAX_LENGTH", "line_number": 191, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 214, "usage_type": "call"}, {"api_name": "dataloader.IndexToletter", "line_number": 254, "usage_type": "call"}, {"api_name": "dataloader.IndexToletter", "line_number": 255, "usage_type": "call"}, {"api_name": "test_criterion.compute_bleu", "line_number": 256, "usage_type": "call"}, {"api_name": "dataloader.IndexToletter", "line_number": 292, "usage_type": "call"}, {"api_name": "dataloader.IndexToletter", "line_number": 293, "usage_type": "call"}, {"api_name": "dataloader.IndexToletter", "line_number": 294, "usage_type": "call"}, {"api_name": "test_criterion.compute_bleu", "line_number": 295, "usage_type": "call"}, {"api_name": "dataloader.MAX_LENGTH", "line_number": 311, "usage_type": "argument"}, {"api_name": "dataloader.IndexToletter", "line_number": 315, "usage_type": "call"}, {"api_name": "test_criterion.Gaussian_score", "line_number": 319, "usage_type": "call"}, {"api_name": "parser.argparser", "line_number": 327, "usage_type": "call"}, {"api_name": "torch.manual_seed", "line_number": 331, "usage_type": "call"}, {"api_name": "torch.backends", "line_number": 332, "usage_type": "attribute"}, {"api_name": "torch.backends", "line_number": 333, "usage_type": "attribute"}, {"api_name": "dataloader.TrainDataset", "line_number": 338, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 339, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 339, "usage_type": "name"}, {"api_name": "dataloader.collate_wrapper", "line_number": 340, "usage_type": "name"}, {"api_name": "dataloader.TestDataset", "line_number": 344, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 349, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 349, "usage_type": "name"}, {"api_name": "torch.optim.SGD", "line_number": 352, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 352, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 369, "usage_type": "call"}, {"api_name": "os.path", "line_number": 369, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 371, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 431, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 447, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 463, "usage_type": "call"}]} +{"seq_id": "192272711", "text": "\"\"\"Find working proxies and use them concurrently.\n\nNote: Pay attention to Broker.serve(), instead of the code listed below.\n Perhaps it will be much useful and friendlier.\n\"\"\"\n\nimport asyncio\nimport logging\nfrom functools import partial\nfrom urllib.parse import urlparse\n\nimport aiohttp\nimport requests\n\nfrom proxybroker import Broker, ProxyPool\nfrom proxybroker.errors import NoProxyError\n\ndef main():\n loop = asyncio.get_event_loop()\n\n proxies = asyncio.Queue(loop=loop)\n\n judges = ['http://httpbin.org/get?show_env',\n 'https://httpbin.org/get?show_env']\n providers = ['http://www.proxylists.net/', 'https://free-proxy-list.net/']\n\n broker = Broker(\n proxies, timeout=8, max_conn=200, max_tries=3, verify_ssl=False,\n judges=judges, providers=providers, loop=loop)\n\n types = [('HTTP', ('Anonymous', 'High')), 'HTTPS']\n countries = ['US', 'DE', 'FR']\n\n urls = ['http://httpbin.org/get', 'https://httpbin.org/get',\n 'http://httpbin.org/redirect/1', 'http://httpbin.org/status/404']\n\n proxy_pool = ProxyPool(proxies)\n\n tasks = asyncio.gather(\n broker.find(types=types, countries=countries, post=False,\n strict=True, limit=100000))\n loop.run_until_complete(tasks)\n\n broker.show_stats(verbose=True)\n\n\nif __name__ == '__main__':\n logging.basicConfig(\n format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='[%H:%M:%S]', level=logging.INFO)\n logger = logging.getLogger('Parser')\n\n main()\n", "sub_path": "find_and_use.py", "file_name": "find_and_use.py", "file_ext": "py", "file_size_in_byte": 1529, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "asyncio.get_event_loop", "line_number": 19, "usage_type": "call"}, {"api_name": "asyncio.Queue", "line_number": 21, "usage_type": "call"}, {"api_name": "proxybroker.Broker", "line_number": 27, "usage_type": "call"}, {"api_name": "proxybroker.ProxyPool", "line_number": 37, "usage_type": "call"}, {"api_name": "asyncio.gather", "line_number": 39, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 48, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 50, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "32712336", "text": "from __future__ import print_function\nfrom PIL import Image\nfrom PIL import ImageTk\nimport tkinter as tki\nimport threading\nimport cv2\nimport time\nimport Employee\n\n\nclass MarkingAttendance:\n def func(self):\n self.thread = threading.Thread(target=self.MA_videoLoop, args=())\n self.thread.start()\n\n def MA_videoLoop(self):\n recognizer = cv2.face.LBPHFaceRecognizer_create()\n recognizer.read('trainer/trainer.yml')\n cascadePath = \"haarcascade_frontalface_default.xml\"\n faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + cascadePath)\n font = cv2.FONT_HERSHEY_SIMPLEX\n id = 0\n minW = 0.1 * self.vs.get(3)\n minH = 0.1 * self.vs.get(4)\n confidence = 0\n\n while True:\n\n ret, self.frame = self.vs.read()\n gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)\n image = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)\n image = Image.fromarray(image)\n image = ImageTk.PhotoImage(image)\n faces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.2,\n minNeighbors=5,\n minSize=(int(minW), int(minH)),\n )\n print(faces)\n\n for (x, y, w, h) in faces:\n cv2.rectangle(self.frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n id, confidence = recognizer.predict(gray[y:y + h, x:x + w])\n\n # If confidence is less them 100 ==> \"0\" : perfect match\n if (confidence < 100):\n confidence_percent = \" {0}%\".format(round(100 - confidence))\n confidence = round(100 - confidence)\n print(confidence)\n print(id)\n else:\n id = \"unknown\"\n confidence_percent = \" {0}%\".format(round(100 - confidence))\n confidence = round(100 - confidence)\n\n cv2.putText(\n self.frame,\n str(id),\n (x + 5, y - 5),\n font,\n 1,\n (255, 255, 255),\n 2\n )\n\n if confidence > 65:\n # print(\"Match Found, Marking Attendance!\")\n result = Employee.Employee(id).addAttendance(method=\"Face\")\n name = Employee.Employee(id).fullName()\n print(result)\n self.label1.config(text=result+' '+name, bg=\"lightgreen\")\n break\n\n if self.panel is None:\n self.panel = tki.Label(image=image)\n self.panel.image = image\n self.panel.pack(side=\"left\", padx=10, pady=10)\n\n\n else:\n self.panel.configure(image=image)\n self.panel.image = image\n cv2.waitKey(1)\n\n def onClose(self):\n print(\"[INFO] closing...\")\n self.stopEvent.set()\n self.root.quit()\n self.vs.release()\n\n def __init__(self, vs):\n self.vs = vs\n self.frame = None\n self.thread = None\n self.stopEvent = None\n\n self.root = tki.Tk()\n self.panel = None\n\n self.vs.set(3, 480)\n self.vs.set(4, 480)\n self.stopEvent = threading.Event()\n\n self.label1 = tki.Label(self.root)\n self.label1.pack(side=\"bottom\", fill=\"both\", expand=\"yes\", padx=10,\n pady=10)\n btn1 = tki.Button(self.root, text=\"Train\")\n btn1.pack(side=\"bottom\", fill=\"both\", expand=\"yes\", padx=10,\n pady=10)\n btn2 = tki.Button(self.root, text=\"Mark Attendance\", command=self.func)\n btn2.pack(side=\"bottom\", fill=\"both\", expand=\"yes\", padx=10,\n pady=10)\n btn3 = tki.Button(self.root, text=\"Display\")\n btn3.pack(side=\"bottom\", fill=\"both\", expand=\"yes\", padx=10,\n pady=10)\n\n self.root.wm_title(\"PyImageSearch PhotoBooth\")\n self.root.wm_protocol(\"WM_DELETE_WINDOW\", self.onClose)\n\n\nprint(\"[INFO] warming up camera...\")\nvs = cv2.VideoCapture(0)\ntime.sleep(2.0)\n\npba = MarkingAttendance(vs)\npba.root.mainloop()\n", "sub_path": "Graphical_Interface.py", "file_name": "Graphical_Interface.py", "file_ext": "py", "file_size_in_byte": 4195, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "threading.Thread", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.face.LBPHFaceRecognizer_create", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.face", "line_number": 17, "usage_type": "attribute"}, {"api_name": "cv2.CascadeClassifier", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.data", "line_number": 20, "usage_type": "attribute"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 30, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 31, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 32, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 32, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 33, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 33, "usage_type": "name"}, {"api_name": "cv2.rectangle", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 57, "usage_type": "call"}, {"api_name": "Employee.Employee", "line_number": 69, "usage_type": "call"}, {"api_name": "Employee.Employee", "line_number": 70, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 76, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 84, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 98, "usage_type": "call"}, {"api_name": "threading.Event", "line_number": 103, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 105, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 108, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 111, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 114, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 123, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 124, "usage_type": "call"}]} +{"seq_id": "295486725", "text": "import datetime\nimport logging\n\nfrom django.conf import settings\n\nfrom celeryutils import task\nfrom tower import ugettext as _\n\nimport amo\nfrom amo.utils import send_mail_jinja\nimport mkt.constants.reviewers as rvw\n\n\nlog = logging.getLogger('z.task')\n\n\n@task\ndef send_mail(cleaned_data, theme_lock):\n \"\"\"\n Send emails out for respective review actions taken on themes.\n \"\"\"\n theme = cleaned_data['theme']\n action = cleaned_data['action']\n reject_reason = cleaned_data['reject_reason']\n reason = None\n if reject_reason:\n reason = rvw.THEME_REJECT_REASONS[reject_reason]\n elif action == rvw.ACTION_DUPLICATE:\n reason = _('Duplicate Submission')\n comment = cleaned_data['comment']\n\n emails = set(theme.addon.authors.values_list('email', flat=True))\n cc = settings.THEMES_EMAIL\n context = {\n 'theme': theme,\n 'base_url': settings.SITE_URL,\n 'reason': reason,\n 'comment': comment\n }\n\n subject = None\n if action == rvw.ACTION_APPROVE:\n subject = _('Thanks for submitting your Theme')\n template = 'reviewers/themes/emails/approve.html'\n theme.addon.update(status=amo.STATUS_PUBLIC)\n\n elif action == rvw.ACTION_REJECT:\n subject = _('A problem with your Theme submission')\n template = 'reviewers/themes/emails/reject.html'\n theme.addon.update(status=amo.STATUS_REJECTED)\n\n elif action == rvw.ACTION_DUPLICATE:\n subject = _('A problem with your Theme submission')\n template = 'reviewers/themes/emails/reject.html'\n theme.addon.update(status=amo.STATUS_REJECTED)\n\n elif action == rvw.ACTION_FLAG:\n subject = _('Theme submission flagged for review')\n template = 'reviewers/themes/emails/flag_reviewer.html'\n theme.addon.update(status=amo.STATUS_REVIEW_PENDING)\n\n # Send the flagged email to themes email.\n emails = [settings.THEMES_EMAIL]\n cc = None\n\n elif action == rvw.ACTION_MOREINFO:\n subject = _('A question about your Theme submission')\n template = 'reviewers/themes/emails/moreinfo.html'\n context['reviewer_email'] = theme_lock.reviewer.email\n theme.addon.update(status=amo.STATUS_REVIEW_PENDING)\n\n amo.log(amo.LOG.THEME_REVIEW, theme.addon, details={\n 'action': action,\n 'reject_reason': reject_reason,\n 'comment': comment}, user=theme_lock.reviewer)\n log.info('Theme %s (%s) - %s' % (theme.addon.name, theme.id, action))\n\n theme.approve = datetime.datetime.now()\n theme.save()\n\n send_mail_jinja(subject, template, context,\n recipient_list=emails, cc=cc,\n from_email=settings.ADDONS_EMAIL,\n headers={'Reply-To': settings.THEMES_EMAIL})\n", "sub_path": "mkt/reviewers/tasks.py", "file_name": "tasks.py", "file_ext": "py", "file_size_in_byte": 2772, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "mkt.constants.reviewers.THEME_REJECT_REASONS", "line_number": 27, "usage_type": "attribute"}, {"api_name": "mkt.constants.reviewers", "line_number": 27, "usage_type": "name"}, {"api_name": "mkt.constants.reviewers.ACTION_DUPLICATE", "line_number": 28, "usage_type": "attribute"}, {"api_name": "mkt.constants.reviewers", "line_number": 28, "usage_type": "name"}, {"api_name": "tower.ugettext", "line_number": 29, "usage_type": "call"}, {"api_name": "django.conf.settings.THEMES_EMAIL", "line_number": 33, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 33, "usage_type": "name"}, {"api_name": "django.conf.settings.SITE_URL", "line_number": 36, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 36, "usage_type": "name"}, {"api_name": "mkt.constants.reviewers.ACTION_APPROVE", "line_number": 42, "usage_type": "attribute"}, {"api_name": "mkt.constants.reviewers", "line_number": 42, "usage_type": "name"}, {"api_name": "tower.ugettext", "line_number": 43, "usage_type": "call"}, {"api_name": "amo.STATUS_PUBLIC", "line_number": 45, "usage_type": "attribute"}, {"api_name": "mkt.constants.reviewers.ACTION_REJECT", "line_number": 47, "usage_type": "attribute"}, {"api_name": "mkt.constants.reviewers", "line_number": 47, "usage_type": "name"}, {"api_name": "tower.ugettext", "line_number": 48, "usage_type": "call"}, {"api_name": "amo.STATUS_REJECTED", "line_number": 50, "usage_type": "attribute"}, {"api_name": "mkt.constants.reviewers.ACTION_DUPLICATE", "line_number": 52, "usage_type": "attribute"}, {"api_name": "mkt.constants.reviewers", "line_number": 52, "usage_type": "name"}, {"api_name": "tower.ugettext", "line_number": 53, "usage_type": "call"}, {"api_name": "amo.STATUS_REJECTED", "line_number": 55, "usage_type": "attribute"}, {"api_name": "mkt.constants.reviewers.ACTION_FLAG", "line_number": 57, "usage_type": "attribute"}, {"api_name": "mkt.constants.reviewers", "line_number": 57, "usage_type": "name"}, {"api_name": "tower.ugettext", "line_number": 58, "usage_type": "call"}, {"api_name": "amo.STATUS_REVIEW_PENDING", "line_number": 60, "usage_type": "attribute"}, {"api_name": "django.conf.settings.THEMES_EMAIL", "line_number": 63, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 63, "usage_type": "name"}, {"api_name": "mkt.constants.reviewers.ACTION_MOREINFO", "line_number": 66, "usage_type": "attribute"}, {"api_name": "mkt.constants.reviewers", "line_number": 66, "usage_type": "name"}, {"api_name": "tower.ugettext", "line_number": 67, "usage_type": "call"}, {"api_name": "amo.STATUS_REVIEW_PENDING", "line_number": 70, "usage_type": "attribute"}, {"api_name": "amo.log", "line_number": 72, "usage_type": "call"}, {"api_name": "amo.LOG", "line_number": 72, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 78, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 78, "usage_type": "attribute"}, {"api_name": "amo.utils.send_mail_jinja", "line_number": 81, "usage_type": "call"}, {"api_name": "django.conf.settings.ADDONS_EMAIL", "line_number": 83, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 83, "usage_type": "name"}, {"api_name": "django.conf.settings.THEMES_EMAIL", "line_number": 84, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 84, "usage_type": "name"}, {"api_name": "celeryutils.task", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "185160333", "text": "import cv2\nimport numpy as np\nfrom moviepy.editor import *\n\nEDGE_COLOR = [0, 255, 0]\nBLUR = 501\nEDIT = True\nENHANCE = False\nwhite = None\n\n\ndef edit_frame(img):\n edges = cv2.Canny(img.copy(), 100, 200)\n\n fin = cv2.GaussianBlur(img.copy(), (BLUR, BLUR), 0)\n\n fin = cv2.addWeighted(fin, 0.4, white, 0.1, 0)\n\n fin[np.where(edges == 255)] = EDGE_COLOR\n\n return fin\n\n\nif __name__ == '__main__':\n in_vid = cv2.VideoCapture(sys.argv[1])\n fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')\n fps = in_vid.get(cv2.CAP_PROP_FPS)\n width, height, frames = int(in_vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(in_vid.get(cv2.CAP_PROP_FRAME_HEIGHT)), int(in_vid.get(cv2.CAP_PROP_FRAME_COUNT))\n\n out_vid_edges = cv2.VideoWriter(\"out_edges_\" + sys.argv[1], fourcc, fps, (width, height), 1)\n\n white = np.zeros((height, width, 3), np.uint8)\n white[:, :, :] = [220, 220, 220]\n\n i = 0\n\n while in_vid.isOpened():\n ret, frame = in_vid.read()\n if not ret:\n break\n\n edges = edit_frame(frame.copy())\n edges = cv2.cvtColor(edges.copy(), cv2.COLOR_RGB2BGR)\n\n out_vid_edges.write(edges)\n\n i += 1\n print(str(round((i / frames) * 100, 2)) + \"%\", end=\"\\r\")\n\n in_vid.release()\n out_vid_edges.release()\n cv2.destroyAllWindows()\n print(\"Done! \")\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1320, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "cv2.Canny", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.addWeighted", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.VideoWriter_fourcc", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FPS", "line_number": 27, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_WIDTH", "line_number": 28, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_HEIGHT", "line_number": 28, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_COUNT", "line_number": 28, "usage_type": "attribute"}, {"api_name": "cv2.VideoWriter", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 32, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2BGR", "line_number": 43, "usage_type": "attribute"}, {"api_name": "cv2.destroyAllWindows", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "383362735", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# from .miniViT import mViT\nfrom models import pose_higher_hrnet\nfrom models.depth_header import DepthEstimation\nfrom models.PM_header import PoseMaskEstimation\nfrom models.voteposenet import VotePoseNet\n\n\nclass HrnetAdaptiveBins(nn.Module):\n def __init__(self, cfg, backend, is_train = True):\n super(HrnetAdaptiveBins, self).__init__()\n # self.encoder = Encoder(backend)\n self.backbone = backend # extracting features \n # depth header\n self.depth_header = eval('DepthEstimation.build')( # hrnet_adabins.build\n cfg, cfg.BINS)\n self.PM_header = PoseMaskEstimation(cfg, basicM_num=1)\n\n def forward(self, x, **kwargs):\n # unet_out = self.decoder(self.encoder(x), **kwargs) # 128通道特征输出,batch 128 h/2 w/2\n # import pdb; pdb.set_trace()\n hrnet_out = self.backbone(x) # 维度输出有问题\n # FEATURE output channel fixed\n bin_edges, pred = self.depth_header(hrnet_out)\n # mask and poseH output\n heatmap, mask_prob = self.PM_header(hrnet_out)\n\n # calculate the loss and get into the votenet \n\n return bin_edges, pred, heatmap, mask_prob, hrnet_out\n\n @classmethod\n def build(cls, cfg, is_train, **kwargs):\n # Building Encoder-Decoder model\n print('Building Hrnet_adabin model..', end='')\n backbone = eval(cfg.BACKBONE_MODEL + '.get_pose_net')(cfg, is_train=is_train)\n m = cls(cfg, backbone, is_train=is_train)\n print('Done.')\n return m\n\n", "sub_path": "lib/models/hrnet_adabins.py", "file_name": "hrnet_adabins.py", "file_ext": "py", "file_size_in_byte": 1602, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "torch.nn.Module", "line_number": 12, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "models.PM_header.PoseMaskEstimation", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "485620148", "text": "from collections import OrderedDict\n\nfrom inputstream import InputStream\n\n# from collections import defaultdict\n# from collections import namedtuple\n\n\nPRINT_JUST_VALUE = True\n\n# ---- Token types ----\n\n# Data types\nDATA_TYPE = 'DATA_TYPE'\nINTEGER, REAL, STRING, FUNCTION = 'INTEGER', 'REAL', 'STRING', 'FUNCTION'\n\n# Constants\nCONSTANT = 'CONSTANT'\nINTEGER_CONST, REAL_CONST, STRING_CONST = 'INTEGER_CONST', 'REAL_CONST', 'STRING_CONST'\n\n# Binary operations\nBINARY_OPERATION = 'BINARY_OPERATION'\nADD, SUB, MUL, INT_DIV, REAL_DIV, POW = 'ADD', 'SUB', 'MUL', 'INT_DIV', 'REAL_DIV', 'POW'\n\n# Unary operations\nUNARY_OPERATION = 'UNARY_OPERATION'\nNEGATE, SQRT, INCREMENT, DECREMENT = 'SUB', 'SQRT', 'INC', 'DEC'\n\n# Built-in functions\nPRINT, SUM = 'PRINT', 'SUM'\n\n# Parenthesis\nPARENTHESIS = 'PARENTHESIS'\nLPARENS, RPARENS, LBRACKET, RBRACKET, LSQUARE, RSQUARE = 'LPARENS', 'RPARENS', 'LBRACKET', 'RBRACKET', 'LSQUARE', 'RSQUARE'\n\n# Operators.\nOPERATORS = 'OPERATORS'\nGREATER_THAN, GREATER_EQUAL_THAN, EQUAL, LESS_EQUAL_THAN, LESS_THAN, NOT_EQUAL = (\n 'GREATER_THAN', 'GREATER_EQUAL_THAN', 'EQUAL', 'LESS_EQUAL_THAN', 'LESS_THAN', 'NOT_EQUAL'\n)\n# Branch.\nBRANCH = 'BRANCH'\nIF, THEN, ELSE, WHILE, OR, AND = 'IF', 'THEN', 'ELSE', 'WHILE', 'OR', 'AND'\n\n# Other\nCOMMA, END_STATEMENT, EOF, DECLARE_ASSIGN, ASSIGN, CALL, DECLARE, INLINE = (\n 'COMMA', 'END_STATEMENT', 'EOF', 'DECLARE_ASSIGN', 'ASSIGN', 'CALL', 'DECLARE', 'INLINE'\n)\n\nID = 'ID'\n\nIS, THAN, TO, NOT = 'IS', 'THAN', 'TO', 'NOT'\n\nUNKNOWN = 'UNKNOWN'\n\n\nclass Token:\n def __init__(self, type_, value):\n self.type = type_\n self.value = value\n\n def __repr__(self):\n return 'Token(type={}, value={})'.format(self.type, self.value)\n\n\nKEYWORDS = {\n 'int' : Token(INTEGER, INTEGER),\n 'real' : Token(REAL, REAL),\n 'string' : Token(STRING, STRING),\n 'inc' : Token(INCREMENT, INCREMENT),\n 'dec' : Token(DECREMENT, DECREMENT),\n 'sqrt' : Token(SQRT, SQRT),\n 'pow' : Token(POW, POW),\n 'print' : Token(FUNCTION, PRINT),\n 'sum' : Token(FUNCTION, SUM),\n 'call' : Token(CALL, CALL),\n 'if' : Token(IF, IF),\n 'then' : Token(THEN, THEN),\n 'else' : Token(ELSE, ELSE),\n 'while' : Token(WHILE, WHILE),\n ';' : Token(END_STATEMENT, END_STATEMENT),\n # '\\t' : Token(INLINE, INLINE),\n # ' ' : Token(INLINE, INLINE),\n\n '/' : Token(REAL_DIV, REAL_DIV),\n ',' : Token(COMMA, COMMA),\n '(' : Token(LPARENS, LPARENS),\n ')' : Token(RPARENS, RPARENS),\n '{' : Token(LBRACKET, LBRACKET),\n '}' : Token(RBRACKET, RBRACKET),\n\n # FIRST SECOND\n '*' : {'default': Token(MUL, MUL), '*': Token(POW, POW)},\n '+' : {'default': Token(ADD, ADD), '+': Token(INCREMENT, INCREMENT)},\n '-' : {'default': Token(SUB, SUB), '-': Token(DECREMENT, DECREMENT)},\n ':' : {'default': Token(DECLARE, DECLARE), '=': Token(DECLARE_ASSIGN, DECLARE_ASSIGN)},\n '=' : {'default': Token(ASSIGN, ASSIGN), '=': Token(EQUAL, EQUAL)},\n '>' : {'default': Token(GREATER_THAN, GREATER_THAN), '=': Token(GREATER_EQUAL_THAN, GREATER_EQUAL_THAN)},\n '<' : {'default': Token(LESS_THAN, LESS_THAN), '=': Token(LESS_EQUAL_THAN, LESS_EQUAL_THAN)},\n '!' : {'default': None, '=': Token(NOT_EQUAL, NOT_EQUAL)},\n\n '≥' : Token(GREATER_EQUAL_THAN, GREATER_EQUAL_THAN),\n '≤' : Token(LESS_EQUAL_THAN, LESS_EQUAL_THAN),\n '√' : Token(SQRT, SQRT),\n '^' : Token(POW, POW),\n '∑' : Token(FUNCTION, SUM),\n\n 'less' : Token(LESS_THAN, LESS_THAN),\n 'greater': Token(GREATER_THAN, GREATER_THAN),\n 'not' : Token(NOT, NOT),\n 'equal' : Token(EQUAL, EQUAL),\n\n 'is' : Token(IS, IS),\n 'than' : Token(THAN, THAN),\n 'or' : Token(OR, OR),\n 'and' : Token(AND, AND),\n 'to' : Token(TO, TO),\n\n}\n\noperator_logic = {\n (LESS_THAN, EQUAL) : LESS_EQUAL_THAN,\n (GREATER_THAN, EQUAL) : GREATER_EQUAL_THAN,\n (EQUAL, LESS_THAN) : LESS_EQUAL_THAN,\n (EQUAL, GREATER_THAN) : GREATER_EQUAL_THAN,\n (LESS_THAN, GREATER_THAN): NOT_EQUAL,\n (GREATER_THAN, LESS_THAN): NOT_EQUAL,\n\n (NOT, EQUAL) : NOT_EQUAL,\n (NOT, NOT_EQUAL) : EQUAL,\n (NOT, GREATER_THAN) : LESS_EQUAL_THAN,\n (NOT, LESS_THAN) : GREATER_EQUAL_THAN,\n (NOT, GREATER_EQUAL_THAN): LESS_THAN,\n (NOT, LESS_EQUAL_THAN) : GREATER_THAN,\n}\n\n\nclass Tokenizer:\n def __init__(self, stream):\n self.stream = stream\n self.current_character = self.stream.next()\n\n def advance(self, times=1):\n current_character = ''\n character = self.current_character\n for i in range(times):\n current_character = self.stream.next()\n self.current_character = current_character\n return character\n\n def skip_comment(self):\n while self.current_character != '\\n' and self.current_character != '':\n self.advance()\n self.advance()\n\n def skip_whitespace(self):\n while self.current_character.isspace():\n self.current_character = self.stream.next()\n\n def read_identifier(self):\n result = self.advance()\n while self.current_character.isalnum() or self.current_character == '_':\n result += self.advance()\n return KEYWORDS.get(result.lower(), Token(ID, result)) # Case-insensitive for keywords\n\n def read_number(self):\n result = self.advance()\n while self.current_character.isdigit():\n result += self.advance()\n if self.current_character == '.':\n result += self.advance()\n while self.current_character.isdigit():\n result += self.advance()\n return Token(REAL_CONST, result)\n else:\n return Token(INTEGER_CONST, result)\n\n def read_string(self):\n self.advance() # Advance pass the first quotation mark.\n result = ''\n while self.current_character != '\"':\n character = self.advance()\n if character == '\\\\':\n next_character = self.advance()\n if next_character == 'n':\n result += '\\n'\n continue\n result += character\n self.advance() # Advance pass the last quotation mark.\n return Token(STRING_CONST, result)\n\n def skip_multiple_end_statements(self):\n if self.current_character == '\\n':\n while self.stream.peek() == '\\n':\n self.advance()\n if self.current_character == '/' and self.stream.peek() == '/':\n self.skip_comment()\n self.skip_multiple_end_statements()\n\n def get_next_token(self):\n\n while self.current_character.isspace() or self.current_character == '/' and self.stream.peek() == '/':\n if self.current_character.isspace():\n self.skip_whitespace()\n if self.current_character == '/' and self.stream.peek() == '/':\n self.skip_comment()\n\n character = self.current_character\n\n if character.isalpha() or character == '_':\n return self.read_identifier()\n elif character.isdigit():\n return self.read_number()\n elif character == '\"':\n return self.read_string()\n\n token = KEYWORDS.get(character)\n if isinstance(token, Token):\n self.advance() # Skip current character.\n return token\n elif isinstance(token, dict):\n self.advance() # Skip current character.\n character = self.current_character\n if character in token:\n self.advance()\n token = token[character]\n else:\n token = token['default']\n return token\n elif character != '':\n return Token(UNKNOWN, character)\n else:\n return Token(EOF, '')\n\n\n# ------------------------------ NODES ------------------------------\n# AtomNode = namedtuple('AtomNode', 'name, type, str')\nclass Atom:\n \"\"\"\n Internal representation of an atom.\n \"\"\"\n TYPE = {INTEGER: int, REAL: float, STRING: str, FUNCTION: callable}\n\n def __init__(self, type_: (str, None), value: (str, callable)):\n assert type_ in Atom.TYPE or type_ is None, '{} not valid type'.format(type_)\n self.type = type_\n self.value = value\n\n def type_check(self, other):\n if self.type == other.type or other.type is None:\n return 1\n elif self.type == INTEGER and other.type == REAL:\n # Only occurs for expressions. Cannot change type of variable.\n self.type = REAL\n return 1\n elif self.type == REAL and other.type == INTEGER:\n other.type = REAL\n return 1\n raise TypeError('')\n\n def __add__(self, other):\n if self.type_check(other):\n return Atom(self.type, str(Atom.TYPE[self.type](self.value) + Atom.TYPE[self.type](other.value)))\n\n def __sub__(self, other):\n if self.type_check(other):\n return Atom(self.type, str(Atom.TYPE[self.type](self.value) - Atom.TYPE[self.type](other.value)))\n\n def __mul__(self, other):\n if self.type_check(other):\n return Atom(self.type, str(Atom.TYPE[self.type](self.value) * Atom.TYPE[self.type](other.value)))\n\n def __floordiv__(self, other):\n if self.type_check(other):\n return Atom(self.type, str(Atom.TYPE[self.type](self.value) // Atom.TYPE[self.type](other.value)))\n\n def __truediv__(self, other):\n if self.type_check(other):\n return Atom(self.type, str(Atom.TYPE[self.type](self.value) / Atom.TYPE[self.type](other.value)))\n\n def __pow__(self, other):\n if self.type_check(other):\n return Atom(self.type, str(Atom.TYPE[self.type](self.value) ** Atom.TYPE[self.type](other.value)))\n\n def __eq__(self, other):\n return Atom.TYPE[self.type](self.value) == Atom.TYPE[self.type](other.value)\n\n def __ne__(self, other):\n return Atom.TYPE[self.type](self.value) != Atom.TYPE[self.type](other.value)\n\n def __ge__(self, other):\n return Atom.TYPE[self.type](self.value) >= Atom.TYPE[self.type](other.value)\n\n def __le__(self, other):\n return Atom.TYPE[self.type](self.value) <= Atom.TYPE[self.type](other.value)\n\n def __gt__(self, other):\n return Atom.TYPE[self.type](self.value) > Atom.TYPE[self.type](other.value)\n\n def __lt__(self, other):\n return Atom.TYPE[self.type](self.value) < Atom.TYPE[self.type](other.value)\n\n def __repr__(self):\n if PRINT_JUST_VALUE:\n return str(self.value)\n return 'Atom({!r} {!r})'.format(self.type, self.value)\n\n\nclass ConstantNode:\n \"\"\"\n Container of a value.\n \"\"\"\n TYPES = INTEGER_CONST, REAL_CONST, STRING_CONST\n\n def __init__(self, inferred_type, value: str):\n assert inferred_type in ConstantNode.TYPES\n assert isinstance(value, str)\n self.inferred_type = inferred_type\n self.value = value\n\n\nclass NameNode:\n \"\"\"\n Container of a name.\n \"\"\"\n\n def __init__(self, name: str):\n assert isinstance(name, str)\n self.name = name\n\n\nclass ListNode:\n def __init__(self, size, data_type, *data):\n self.size = size\n self.data = data\n self.data_type = data_type\n\n\nclass AssignNode:\n \"\"\"\n\n \"\"\"\n TYPES = INTEGER, REAL, STRING, FUNCTION\n\n def __init__(self, name, type_, expression):\n assert isinstance(name, NameNode)\n assert type_ in AssignNode.TYPES or type_ is None, '{} is not a valid type!'.format(type_)\n self.name = name\n self.type = type_\n self.expression = expression # Needs to be evaluated\n\n\nclass ReAssign:\n def __init__(self, name, expression):\n assert isinstance(name, NameNode)\n self.name = name\n self.expression = expression\n\n\nclass BinaryOperationNode:\n OPERATIONS = ADD, SUB, MUL, INT_DIV, REAL_DIV, POW\n\n def __init__(self, left_expression, operation, right_expression):\n assert operation in BinaryOperationNode.OPERATIONS\n self.left_expression = left_expression\n self.operation = operation\n self.right_expression = right_expression\n\n\nclass UnaryOperationNode:\n OPERATIONS = NEGATE, SQRT, INCREMENT, DECREMENT\n\n def __init__(self, operation, expression):\n assert operation in UnaryOperationNode.OPERATIONS\n self.operation = operation\n self.expression = expression\n\n\nclass BlockNode:\n def __init__(self, statements):\n self.statements = statements\n self.namespace = OrderedDict()\n\n\nclass BranchNode:\n def __init__(self, left, condition, right=None):\n assert isinstance(condition, ConditionNode)\n self.left = left\n self.condition = condition\n self.right = right if right is not None else NoOperationNode()\n\n\nclass WhileLoopNode:\n def __init__(self, condition, block):\n assert isinstance(condition, ConditionNode)\n self.condition = condition\n self.block = block\n\n\nclass ConditionNode:\n OPERATIONS = GREATER_THAN, GREATER_EQUAL_THAN, EQUAL, LESS_EQUAL_THAN, LESS_THAN, NOT_EQUAL, AND, OR\n\n def __init__(self, left, operator, right):\n assert operator in ConditionNode.OPERATIONS, '{} not a valid operator!'.format(operator)\n self.left = left\n self.operator = operator\n self.right = right\n\n\nclass CallNode:\n def __init__(self, name, arguments=None):\n assert isinstance(name, str)\n self.name = name\n self.arguments = arguments\n\n\nclass BuiltInFunction:\n def __init__(self, name, arguments):\n assert isinstance(name, str)\n self.name = name\n self.arguments = list(arguments)\n\n\nclass NoOperationNode:\n pass\n\n\n# ------------------------------ PARSER ------------------------------\n\nclass ParserError(Exception):\n def __init__(self, parser, error_message=''):\n message = \"Error around row {}, column {}:\"\" \\\n \"\"\\n Illogical token sequence: {}, {}\".format(\n parser.token_stream.stream.row, parser.token_stream.stream.column,\n parser.previous_token, parser.current_token\n )\n super().__init__(error_message + '\\n' + message)\n\n\nclass Parser:\n \"\"\"\n ---- Grammar ----\n\n program : (statement)*\n block : LBRACKET (statement)* RBRACKET\n | statement statement\n statement : assignment END_STATEMENT\n assignment : variable DECLARE_ASSIGN expression\n expression : term ((ADD | SUB) term)*\n term : factor ((MUL | INT_DIV | REAL_DIV) factor)*\n factor : INTEGER_CONST\n | REAL_CONST\n | variable\n | NEG factor\n | LPAREN expr RPAREN\n variable : ID\n \"\"\"\n\n def __init__(self, token_stream):\n self.token_stream = token_stream\n self.previous_token = None\n self.current_token = self.token_stream.get_next_token()\n\n def parse(self):\n return self.program()\n\n def program(self):\n \"\"\"\n program : (statement)* EOF\n \"\"\"\n statements = []\n while self.current_token.type != EOF:\n statements.append(self.statement())\n return BlockNode(statements) # ProgramNode?\n\n def statement(self):\n \"\"\"\n statement : assignment END_STATEMENT Is also for declaration.\n | function END_STATEMENT\n | block Should a block require an end statement?\n | call END_STATEMENT\n | IF condition THEN statement (ELSE IF condition block)* Block or statement???\n | IF condition THEN statement (ELSE IF condition block)* ELSE block\n | WHILE condition THEN statement\n | END_STATEMENT\n\n return : AssignNode, FunctionNode, CallNode, BlockNode or BranchNode.\n \"\"\"\n\n if self.current_token.type == ID: # Will fail when using user-defined functions.\n node = self.assignment()\n self.consume(END_STATEMENT)\n return node\n elif self.current_token.type == FUNCTION:\n node = self.function()\n self.consume(END_STATEMENT)\n return node\n elif self.current_token.type == LBRACKET:\n node = self.block()\n return node\n elif self.current_token.type == CALL:\n node = self.call()\n self.consume(END_STATEMENT)\n return node\n elif self.current_token.type == IF:\n self.consume(IF)\n condition = self.condition()\n self.consume(THEN)\n node = BranchNode(self.statement(), condition)\n while self.current_token.type == ELSE:\n self.consume(ELSE)\n if self.current_token.type == IF:\n node.right = self.statement() # Since token type is 'IF', it'll come back here.\n else:\n node.right = self.statement()\n return node\n elif self.current_token.type == WHILE:\n self.consume(WHILE)\n condition = self.condition()\n self.consume(THEN)\n return WhileLoopNode(condition, self.block())\n\n raise ParserError(self)\n\n def condition(self, previous_condition=None):\n \"\"\"\n condition : expression OPERATOR expression\n | expression [IS] OPERATOR [OR OPERATOR] [THAN|TO] expression\n | expression [IS] NOT [OPERATOR [OR OPERATOR]] [THAN|TO] expression\n | condition ((AND|OR) condition)*\n \"\"\"\n if self.current_token.type in (\n EQUAL, NOT_EQUAL, GREATER_THAN, GREATER_EQUAL_THAN, LESS_THAN, LESS_EQUAL_THAN, NOT, IS) \\\n and previous_condition is not None:\n node = previous_condition\n if isinstance(node.left, NameNode):\n left = node.left\n else:\n raise Exception(\n 'Only left variable can be chained. Switch {} with {}.'.format(node.left.value, node.right.name))\n else:\n left = self.expression()\n\n if self.current_token.type == IS:\n self.consume(IS)\n\n operator = self.current_token.type\n self.consume(operator)\n\n if operator == NOT: # Already consumed!\n if self.current_token.type in (\n EQUAL, NOT_EQUAL, GREATER_THAN, GREATER_EQUAL_THAN, LESS_THAN, LESS_EQUAL_THAN):\n operator2 = self.current_token.type\n self.consume(operator2)\n if self.current_token.type == OR:\n self.consume(OR)\n operator3 = self.current_token.type\n self.consume(operator3)\n operator2 = operator_logic[operator2, operator3]\n operator = operator_logic[operator, operator2]\n else:\n operator = NOT_EQUAL\n elif self.current_token.type == OR:\n self.consume(OR)\n operator2 = self.current_token.type\n self.consume(operator2)\n operator = operator_logic[operator, operator2]\n\n if self.current_token.type in (THAN, TO):\n self.consume(self.current_token.type)\n\n right = self.expression()\n\n node = ConditionNode(left, operator, right)\n\n if self.current_token.type in (AND, OR):\n logic_operator = self.current_token.type\n self.consume(logic_operator)\n return ConditionNode(node, logic_operator, self.condition(node))\n\n return node\n\n def call(self):\n \"\"\"\n call : CALL variable [LPARENS expression (COMMA expression)* RPARENS]\n \"\"\"\n self.consume(CALL)\n name = self.current_token.value\n self.consume(ID)\n\n arguments = None\n\n if self.current_token.type == LPARENS:\n self.consume(LPARENS)\n arguments = [self.assignment()]\n while self.current_token.type == COMMA:\n self.consume(COMMA)\n arguments.append(self.assignment())\n self.consume(RPARENS)\n\n return CallNode(name, arguments)\n\n def block(self):\n \"\"\"\n block : LBRACKET (statement)* RBRACKET\n\n return : BlockNode.\n \"\"\"\n self.consume(LBRACKET)\n statements = []\n while self.current_token.type != RBRACKET:\n statements.append(self.statement())\n self.consume(RBRACKET)\n return BlockNode(statements)\n\n def function(self): # Procedure?\n \"\"\"\n function : FUNCTION [arguments]*\n \"\"\"\n token = self.current_token\n self.consume(FUNCTION)\n node = BuiltInFunction(token.value, self.arguments())\n return node\n\n def assignment(self):\n \"\"\"\n assignment : variable DECLARE_ASSIGN expression\n | variable DECLARE_ASSIGN block Procedure.\n | variable DECLARE DATA_TYPE ASSIGN expression\n | variable DECLARE DATA_TYPE Just a declaration\n | variable ASSIGN expression Must be declared first.\n\n returns : AssignNode or DeclarationNode\n \"\"\"\n variable = NameNode(self.current_token.value)\n self.consume(ID)\n\n if self.current_token.type == DECLARE_ASSIGN:\n self.consume(DECLARE_ASSIGN)\n\n if self.current_token.type == LBRACKET:\n return AssignNode(variable, FUNCTION, self.block())\n else:\n return AssignNode(variable, None, self.expression())\n\n elif self.current_token.type == DECLARE:\n self.consume(DECLARE)\n\n if self.current_token.type == LPARENS:\n self.consume(LPARENS)\n arguments = self.parameters()\n self.consume(RPARENS)\n self.consume(ASSIGN)\n body = self.block()\n for arg_assignments in arguments:\n body.statements.insert(0, arg_assignments)\n return AssignNode(variable, FUNCTION, body)\n\n data_type = self.current_token.value\n self.consume(self.current_token.type)\n\n if self.current_token.type == ASSIGN:\n self.consume(ASSIGN)\n return AssignNode(variable, data_type, self.expression())\n else:\n return AssignNode(variable, data_type, NoOperationNode())\n\n elif self.current_token.type == ASSIGN:\n self.consume(ASSIGN)\n return ReAssign(variable, self.expression())\n\n else:\n raise ParserError(self)\n\n def parameters(self):\n arguments = [self.assignment()]\n\n while self.current_token.type == COMMA:\n self.consume(COMMA)\n arguments.append(self.assignment())\n\n return arguments\n\n def arguments(self):\n\n if self.current_token.type == LPARENS:\n self.consume(LPARENS)\n\n arguments = []\n\n while self.current_token.type == COMMA:\n self.consume(COMMA)\n arguments.append(self.expression())\n\n self.consume(RPARENS)\n\n else:\n arguments = [self.expression()]\n\n while self.current_token.type == COMMA:\n self.consume(COMMA)\n arguments.append(self.expression())\n\n return arguments\n\n def expression(self):\n \"\"\"\n expression : term ((ADD | SUB) term)* (INC|DEC)*\n\n returns : DataNode, VariableNode, UnaryOperationNode or BinaryOperationNode.\n \"\"\"\n node = self.term()\n\n while self.current_token.type in (ADD, SUB):\n token = self.current_token\n self.consume(token.type)\n node = BinaryOperationNode(node, token.type, self.term())\n\n while self.current_token.type in (INCREMENT, DECREMENT):\n token = self.current_token\n self.consume(token.type)\n node = UnaryOperationNode(token.type, node)\n\n return node\n\n def term(self):\n \"\"\"\n term : factor ((MUL | INT_DIV | REAL_DIV) factor)* (INC|DEC)*\n\n returns : DataNode, VariableNode, UnaryOperationNode or BinaryOperationNode.\n \"\"\"\n node = self.factor()\n\n while self.current_token.type in (MUL, REAL_DIV, INT_DIV, POW):\n token = self.current_token\n self.consume(token.type)\n node = BinaryOperationNode(node, token.type, self.factor())\n\n while self.current_token.type in (INCREMENT, DECREMENT, SQRT):\n token = self.current_token\n self.consume(token.type)\n node = UnaryOperationNode(token.type, node)\n\n return node\n\n def factor(self):\n \"\"\"\n factor : INTEGER_CONST\n | REAL_CONST\n | variable\n | (NEGATE|SQRT|INC|DEC|PRINT) factor\n | LPAREN expr RPAREN\n | FUNCTION [arguments | (LPARENS arguments RPARENS)]\n\n returns : DataNode, VariableNode, UnaryOperationNode or BinaryOperationNode.\n \"\"\"\n token = self.current_token\n\n if token.type == INTEGER_CONST:\n self.consume(INTEGER_CONST)\n return ConstantNode(token.type, token.value)\n elif token.type == REAL_CONST:\n self.consume(REAL_CONST)\n return ConstantNode(token.type, token.value)\n elif token.type == STRING_CONST:\n self.consume(STRING_CONST)\n return ConstantNode(token.type, token.value)\n elif token.type == ID:\n self.consume(ID)\n return NameNode(token.value)\n elif token.type in (NEGATE, SQRT, INCREMENT, DECREMENT, PRINT):\n self.consume(token.type)\n return UnaryOperationNode(token.type, self.factor())\n elif token.type == LPARENS:\n self.consume(LPARENS)\n node = self.expression() # DataNode, VariableNode, UnaryOperationNode, BinaryOperationNode\n self.consume(RPARENS)\n return node\n elif token.type == FUNCTION:\n name = token.value\n self.consume(FUNCTION)\n if self.current_token.type == LPARENS:\n self.consume(LPARENS)\n node = BuiltInFunction(name, self.arguments())\n if self.current_token.type == RPARENS:\n self.consume(RPARENS)\n return node\n else:\n raise ParserError(self)\n\n def consume(self, token_type):\n if self.current_token.type == token_type:\n self.previous_token = self.current_token\n self.current_token = self.token_stream.get_next_token()\n else:\n raise ParserError(self, 'Got {}, expected {}'.format(self.current_token.type, token_type))\n\n\n# ------------------------------ INTERPRETER ------------------------------\n\n\n\n\nclass Interpreter:\n\n def __init__(self, parser):\n self.parser = parser\n self.current_namespace = None\n self.file = None\n\n def visit_error(self, node):\n error_message = 'Could not find method visit_{}.'.format(node.__class__.__name__)\n raise ParserError(self.parser, error_message)\n\n def error(self, node):\n error_message = 'Error in node {}.'.format(node.__class__.__name__.replace('Node'))\n raise ParserError(self.parser, error_message)\n\n def visit(self, node):\n method_name = 'visit_' + node.__class__.__name__\n method = getattr(self, method_name, self.visit_error)\n return method(node)\n\n def interpret(self):\n self.file = open('test', 'w')\n self.visit(self.parser.parse())\n\n def visit_BlockNode(self, node):\n\n current_namespace = self.current_namespace\n\n new_namespace = node.namespace\n new_namespace['__global_namespace__'] = current_namespace\n\n self.current_namespace = new_namespace # Use new namespace as current namespace.\n\n for statement in node.statements:\n self.visit(statement)\n\n new_namespace.clear()\n\n self.current_namespace = current_namespace # Restore old namespace.\n\n def visit_BuiltInFunction(self, node):\n if node.name == PRINT:\n return self.file.write('printf({})'.format(' '.join(str(self.visit(argument)) for argument in node.arguments)))\n elif node.name == SUM:\n return self.file.write(' + '.join(str(self.visit(argument)) for argument in node.arguments))\n self.error(node)\n\n def visit_ListNode(self, node):\n pass\n\n def visit_UnaryOperationNode(self, node):\n operation = node.operation\n\n if operation == NEGATE:\n return self.file.write('-{}'.format(self.visit(node.expression)))\n if operation == SQRT:\n return self.file.write('pow({}, 0.5)'.format(self.visit(node.expression)))\n if operation == INCREMENT:\n return self.file.write('{}++'.format(self.visit(node.expression)))\n if operation == DECREMENT:\n return self.file.write('{}--'.format(self.visit(node.expression)))\n\n self.error(node)\n\n def visit_BinaryOperationNode(self, node):\n operation = node.operation\n\n if operation == ADD:\n return self.file.write('{} + {}'.format(self.visit(node.left_expression), self.visit(node.right_expression)))\n if operation == SUB:\n return self.file.write('{} - {}'.format(self.visit(node.left_expression), self.visit(node.right_expression)))\n if operation == MUL:\n return self.file.write('{} * {}'.format(self.visit(node.left_expression), self.visit(node.right_expression)))\n if operation == INT_DIV:\n return self.file.write('{} // {}'.format(self.visit(node.left_expression), self.visit(node.right_expression)))\n if operation == REAL_DIV:\n return self.file.write('{} / {}'.format(self.visit(node.left_expression), self.visit(node.right_expression)))\n if operation == POW:\n return self.file.write('pow({}, {})'.format(self.visit(node.left_expression), self.visit(node.right_expression)))\n\n self.error(node)\n\n def visit_NameNode(self, node):\n namespace = self.current_namespace\n\n while namespace is not None:\n value = namespace.get(node.name)\n if value is not None:\n return value\n else:\n namespace = namespace.get('__global_namespace__')\n\n raise NameError('{} is undefined'.format(node.name))\n\n def visit_ConstantNode(self, node):\n data_type = node.inferred_type\n\n if data_type == INTEGER_CONST:\n return self.file.write('int {}'.format(node.value))\n if data_type == REAL_CONST:\n return self.file.write('float {}'.format(node.value))\n if data_type == STRING_CONST:\n return self.file.write('String {}'.format(node.value))\n\n self.error(node)\n\n def visit_AssignNode(self, node):\n\n name = node.name.name # AssignNode.NameNode.name\n data_type = node.type\n\n if name in self.current_namespace:\n print(self.current_namespace)\n raise NameError('{} already defined!'.format(name))\n\n if data_type == FUNCTION:\n self.current_namespace[name] = Atom(FUNCTION, node.expression)\n return\n\n atom = self.visit(node.expression) # NO TYPE-CHECKING AT DECLARATION, ONLY REASSIGNMENT.\n if data_type is None:\n self.current_namespace[name] = Atom(atom.type, atom.value)\n elif atom is None:\n self.current_namespace[name] = Atom(data_type, None)\n else:\n self.current_namespace[name] = Atom(data_type, atom.value)\n\n def visit_ReAssign(self, node):\n name = node.name.name # ReAssignNode.NameNode.name\n\n atom = self.visit(node.name)\n\n new_atom = self.visit(node.expression)\n if atom.type != new_atom.type:\n raise TypeError('{} cannot be assigned to {}'.format(new_atom.type, atom.type))\n else:\n namespace = self.current_namespace\n\n while namespace is not None:\n value = namespace.get(name)\n if value is not None:\n namespace[name].value = new_atom.value\n return\n else:\n namespace = namespace.get('__global_namespace__')\n\n raise NameError('{} is undefined'.format(name))\n\n def visit_CallNode(self, node):\n\n namespace = self.current_namespace\n\n while namespace is not None:\n atom = namespace.get(node.name)\n if atom is not None:\n block = atom.value\n if node.arguments is not None:\n offset = len(node.arguments)\n for i, argument in enumerate(node.arguments):\n block.statements.insert(offset + i, argument)\n return self.visit(block)\n else:\n namespace = namespace.get('__global_namespace__')\n\n raise NameError('{} is undefined'.format(node.name))\n\n def visit_ConditionNode(self, node):\n left = self.visit(node.left)\n right = self.visit(node.right)\n operator = node.operator\n\n if operator == EQUAL:\n self.file.write('{} == {}'.format(left, right))\n elif operator == LESS_THAN:\n self.file.write('{} < {}'.format(left, right))\n elif operator == LESS_EQUAL_THAN:\n self.file.write('{} <= {}'.format(left, right))\n elif operator == GREATER_THAN:\n self.file.write('{} > {}'.format(left, right))\n elif operator == GREATER_EQUAL_THAN:\n self.file.write('{} >= {}'.format(left, right))\n elif operator == NOT_EQUAL:\n self.file.write('{} != {}'.format(left, right))\n elif operator == OR:\n self.file.write('{} || {}'.format(left, right))\n elif operator == AND:\n self.file.write('{} && {}'.format(left, right))\n else:\n self.error(node)\n\n def visit_BranchNode(self, node):\n if self.visit(node.condition):\n self.visit(node.left)\n else:\n self.visit(node.right)\n\n def visit_NoOperationNode(self, node):\n pass\n\n def visit_WhileLoopNode(self, node):\n while self.visit(node.condition):\n self.visit(node.block)\n\n\nimport sys\nimport datetime\n\nsource_path = ''\nif len(sys.argv) >= 2: # Running from the command line with path argument\n source_path = sys.argv[1]\n source_code = open(source_path).read()\n\n start_time = datetime.datetime.now()\n\n print('Running: {} at {}\\n'.format(source_path, start_time))\n\n interpreter = Interpreter(Parser(Tokenizer(InputStream(source_code))))\n interpreter.interpret()\n\n print('\\nFinished in {}.\\n'.format(datetime.datetime.now() - start_time))\n exit()\n\ninterpreter = Interpreter(Parser(Tokenizer(InputStream(open('scripts/test2.ted').read()))))\ninterpreter.interpret()", "sub_path": "compiler.py", "file_name": "compiler.py", "file_ext": "py", "file_size_in_byte": 35216, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "collections.OrderedDict", "line_number": 382, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 1012, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 1013, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 1016, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1016, "usage_type": "attribute"}, {"api_name": "inputstream.InputStream", "line_number": 1020, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 1023, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1023, "usage_type": "attribute"}, {"api_name": "inputstream.InputStream", "line_number": 1026, "usage_type": "call"}]} +{"seq_id": "497091866", "text": "from websocket import create_connection\nfrom orderbook_base import OrderbookBase\nfrom threading import Thread\nimport gzip\nimport time\nimport json\nimport logging\n\nclass HuobiOrderbook(OrderbookBase):\n pairs_dict = {'BTC-USD': 'btcusdt', 'BCH-USD': 'bchusdt', 'BCH-BTC': 'bchbtc', 'LTC-BTC': 'ltcbtc',\n 'ETH-BTC': 'ethbtc'}\n inverse_pairs = {'btcusdt': 'BTC-USD', 'bchusdt': 'BCH-USD', 'bchbtc': 'BCH-BTC', 'ltcbtc': 'LTC-BTC',\n 'ethbtc': 'ETH-BTC'}\n\n def __init__(self, asset_pairs, fees, **kwargs):\n super().__init__(asset_pairs, fees)\n self.log = logging.getLogger('smart-trader')\n self._listener_thread = None\n self._listener_ws = None\n self._is_running = False\n self._current_orderbook = {}\n\n def _start(self):\n self.log.debug('start huobi exchange')\n while(True):\n try:\n self._listener_ws = create_connection(\"wss://api.huobipro.com/ws\")\n break\n except Exception as e:\n self.log.error('connect ws error: <%s>, retry...', e)\n time.sleep(5)\n self.log.debug('successfully connected to ws')\n for asset_pair in self._asset_pairs:\n if asset_pair in HuobiOrderbook.pairs_dict:\n pair = HuobiOrderbook.pairs_dict.get(asset_pair)\n self._listener_ws.send(\"\"\"{\"sub\": \"market.\"\"\" + pair + \"\"\".depth.step0\", \"id\": \"id10\"}\"\"\")\n self.log.debug(\"Listening to pair: <%s>\", pair)\n\n self._listener_thread = Thread(target=self.handle_data, daemon=True, name='Listen to Huobi queue')\n self._listener_thread.start()\n\n def _stop(self):\n self.log.info('Stop Huobi excange')\n self._is_running = False\n if self._listener_thread is not None and self._listener_thread.is_alive:\n self._listener_thread.join()\n\n def handle_data(self):\n self._is_running = True\n while(self._is_running):\n compressData = self._listener_ws.recv()\n if len(compressData) == 0:\n self.log.info(\"Restarting connection to Huobi\")\n self._listener_ws = create_connection(\"wss://api.huobipro.com/ws\")\n for asset_pair in self._asset_pairs:\n if asset_pair in HuobiOrderbook.pairs_dict.keys():\n pair = HuobiOrderbook.pairs_dict.get(asset_pair)\n self._listener_ws.send(\"\"\"{\"sub\": \"market.\"\"\" + pair + \"\"\".depth.step0\", \"id\": \"id10\"}\"\"\")\n self.log.debug(\"Listening to pair:\", pair)\n else:\n result = gzip.decompress(compressData).decode('utf-8')\n message = json.loads(result)\n if 'ping' in message:\n ts = message['ping']\n pong = {'pong': ts}\n self._listener_ws.send(str(pong))\n else:\n if 'ch' in message:\n asset_pair = self.inverse_pairs[message['ch'].split('.')[1]]\n self._current_orderbook.update({asset_pair: self.normalize_orderbook(message)})\n\n def normalize_orderbook(self, message):\n orderbook = message.get('tick')\n if orderbook is not None:\n ts = message.get('ts')\n normalized_bids = [{'price': bid[0], 'size': bid[1], 'source': 'Huobi'} for bid in orderbook['bids']]\n normalized_asks = [{'price': ask[0], 'size': ask[1], 'source': 'Huobi'} for ask in orderbook['asks']]\n return {'time': ts, 'bids': normalized_bids, 'asks': normalized_asks}\n\n def _get_orderbook_from_exchange(self, asset_pair, size):\n try:\n result = {'asks': [],\n 'bids': []}\n if self._current_orderbook[asset_pair] != None:\n result = {'asks': self._current_orderbook[asset_pair]['asks'][:size],\n 'bids': self._current_orderbook[asset_pair]['bids'][:size]}\n return result\n except Exception as e:\n return {'bids': [], 'asks': []}\n\n\n\n# if __name__ == '__main__':\n# print ('start process')\n# huobiOrderbook = HuobiOrderbook(['BTC-USD', 'BCH-USD'], 5);\n# huobiOrderbook._start()\n# time.sleep(5)\n# print(huobiOrderbook._get_orderbook_from_exchange('BTC-USD', 3))\n# print(huobiOrderbook._get_orderbook_from_exchange('BCH-USD', 3))\n \n\n\n\n\n# -*- coding: utf-8 -*-\n#author: 半熟的韭菜\n\n# from websocket import create_connection\n# import gzip\n# import time\n\n# if __name__ == '__main__':\n# while(1):\n# try:\n# ws = create_connection(\"wss://api.huobipro.com/ws\")\n# break\n# except:\n# print('connect ws error,retry...')\n# time.sleep(5)\n\n# # 订阅 KLine 数据\n# # tradeStr=\"\"\"{\"sub\": \"market.ethusdt.kline.1min\",\"id\": \"id10\"}\"\"\"\n\n# # 请求 KLine 数据\n# # tradeStr=\"\"\"{\"req\": \"market.ethusdt.kline.1min\",\"id\": \"id10\", \"from\": 1513391453, \"to\": 1513392453}\"\"\"\n\n# #订阅 Market Depth 数据\n# # tradeStr=\"\"\"{\"sub\": \"market.ethusdt.detail\", \"id\": \"id10\"}\"\"\"\n\n# #请求 Market Depth 数据\n# tradeStr=\"\"\"{\"sub\": \"market.btcusdt.depth.step3\", \"id\": \"id10\"}\"\"\"\n\n# #订阅 Trade Detail 数据\n# # tradeStr=\"\"\"{\"sub\": \"market.ethusdt.trade.detail\", \"id\": \"id10\"}\"\"\"\n\n# #请求 Trade Detail 数据\n# # tradeStr=\"\"\"{\"req\": \"market.ethusdt.trade.detail\", \"id\": \"id10\"}\"\"\"\n\n# #请求 Market Detail 数据\n# # tradeStr=\"\"\"{\"req\": \"market.ethusdt.detail\", \"id\": \"id12\"}\"\"\"\n\n# ws.send(tradeStr)\n# while(1):\n# compressData=ws.recv()\n# result=gzip.decompress(compressData).decode('utf-8')\n# if result[:7] == '{\"ping\"':\n# ts=result[8:21]\n# pong='{\"pong\":'+ts+'}'\n# ws.send(pong)\n# ws.send(tradeStr)\n# else:\n# print(result) \n ", "sub_path": "huobi_orderbook.py", "file_name": "huobi_orderbook.py", "file_ext": "py", "file_size_in_byte": 5934, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "orderbook_base.OrderbookBase", "line_number": 9, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 17, "usage_type": "call"}, {"api_name": "websocket.create_connection", "line_number": 27, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 31, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 39, "usage_type": "call"}, {"api_name": "websocket.create_connection", "line_number": 54, "usage_type": "call"}, {"api_name": "gzip.decompress", "line_number": 61, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "647637922", "text": "import logging\nimport weakref\nimport inspect\n\nfrom osis.model.fields import Field, GUID, String\n\nlogger = logging.getLogger('osis.model')\n\nGUIDField = GUID()\nGUIDField.name = 'guid'\nVersionField = GUID()\nVersionField.name = 'version'\nCreationDateField = String()\nCreationDateField.name = 'creationdate'\nDEFAULT_FIELDS = (GUIDField, VersionField, CreationDateField, )\n\nclass _OsisModelAttribute(object):\n def __init__(self, name, attribute):\n self.name = name\n self.attribute = attribute\n\nclass _OsisModelInfo(object):\n def __init__(self, name, attrs):\n self.type = None\n self.name = name\n\n self.read_attributes(attrs)\n\n def read_attributes(self, attrs):\n logger.debug('Creating attribute info for %s' % self.name)\n self.attributes = tuple(_OsisModelAttribute(*info) for info in\n attrs.iteritems() if isinstance(info[1], Field))\n\n def __str__(self):\n return 'OSIS model info for %s' % self.name\n\n\nclass ModelMeta(type):\n def __new__(cls, name, bases, attrs, allow_slots=False):\n logger.info('Generating model type %s' % name)\n try:\n Model\n RootObjectModel\n except NameError:\n return type.__new__(cls, name, bases, attrs)\n\n if not allow_slots and '__slots__' in attrs:\n raise RuntimeError(\n 'Don\\'t set a \\'__slots__\\' value on model classes')\n\n assert 'guid' not in attrs, \\\n 'Model classes should have no explicit \\'guid\\' attribute'\n assert 'version' not in attrs, \\\n 'Model classes should have no explicit \\'version\\' attribute'\n assert 'creationdate' not in attrs, \\\n 'Model classes should have no explicit' \\\n 'creationdate\\' attribute'\n\n for field in DEFAULT_FIELDS:\n attrs[field.name] = field\n\n attrs['OSIS_MODEL_INFO'] = _OsisModelInfo(name, attrs)\n\n for attr_name, attr in attrs.iteritems():\n if isinstance(attr, Field) and attr not in DEFAULT_FIELDS:\n attr.name = attr_name\n\n import osis.model\n extra_bases = set(bases).difference(\n set((osis.model.Model, osis.model.RootObjectModel, )))\n if extra_bases:\n raise RuntimeError(\n 'A model should only inherit from Model or RootObjectModel, '\n 'not %s' % repr([base.__name__ for base in extra_bases]))\n\n for base in bases:\n if not hasattr(base, '__slots__'):\n raise RuntimeError('Base class %s has no __slots__ defined' %\n base.__name__)\n\n # Calculate and set __slots__ - see 'Datamodel' in the Python\n # language reference\n slots = ['_osis_store', ]\n for attrname, attr in attrs.iteritems():\n if isinstance(attr, Field):\n slots.append(attrname)\n attrs['__slots__'] = tuple(slots)\n\n type_ = type.__new__(cls, name, bases, attrs)\n # Do we actually need this?\n type_.OSIS_MODEL_INFO.type = weakref.proxy(type_)\n\n # Perform one more __slots__ check, just to be sure (other metaclasses\n # might fool us)\n for base in inspect.getmro(type_):\n if base is not object and not hasattr(base, '__slots__'):\n raise RuntimeError('Base class %s has no __slots__ defined' % \\\n base.__name__)\n\n return type_\n\n\nclass Model(object):\n __metaclass__ = ModelMeta\n __slots__ = ('_osis_store', )\n\n # Make PyLint happy, set by metaclass\n OSIS_MODEL_INFO = None\n\n def __init__(self, **kwargs):\n self._osis_store = dict()\n\n attribute_names = set(attr.name for attr in\n self.OSIS_MODEL_INFO.attributes)\n\n for key, value in kwargs.iteritems():\n if key not in attribute_names:\n raise ValueError('Unknown attribute %s' % key)\n\n setattr(self, key, value)\n\n def __str__(self):\n d = dict()\n for attr in self.OSIS_MODEL_INFO.attributes:\n d[attr.name] = getattr(self, attr.name)\n\n return str(d)\n\n def __eq__(self, other):\n if self is other:\n return True\n\n if not type(self) is type(other):\n return NotImplemented\n\n if not self.version or not self.guid:\n return False\n\n return self.guid == other.guid and self.version == other.version\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n if not self.version:\n return hash(self.guid) if self.guid else object.__hash__(self)\n\n return hash((self.guid, self.version, ))\n\n\nclass RootObjectModel(Model):\n __slots__ = tuple()\n\n def serialize(self, serializer):\n return serializer.serialize(self)\n\n @classmethod\n def deserialize(cls, deserializer, data):\n return deserializer.deserialize(cls, data)\n", "sub_path": "code/osis/model/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 4960, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "logging.getLogger", "line_number": 7, "usage_type": "call"}, {"api_name": "osis.model.fields.GUID", "line_number": 9, "usage_type": "call"}, {"api_name": "osis.model.fields.GUID", "line_number": 11, "usage_type": "call"}, {"api_name": "osis.model.fields.String", "line_number": 13, "usage_type": "call"}, {"api_name": "osis.model.fields.Field", "line_number": 32, "usage_type": "argument"}, {"api_name": "osis.model.fields.Field", "line_number": 65, "usage_type": "argument"}, {"api_name": "osis.model.fields.model", "line_number": 70, "usage_type": "attribute"}, {"api_name": "osis.model.fields", "line_number": 70, "usage_type": "name"}, {"api_name": "osis.model.fields.Field", "line_number": 85, "usage_type": "argument"}, {"api_name": "weakref.proxy", "line_number": 91, "usage_type": "call"}, {"api_name": "inspect.getmro", "line_number": 95, "usage_type": "call"}]} +{"seq_id": "302008113", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nfunctions to make mouse gene expression queries and manipulations\n\"\"\"\nimport requests\nfrom xml.etree import ElementTree as ET\n\n# specify RMA query model, restrain the results to mouse\n# and to the genes that have section data sets available\nURL_PREFIX = \"http://api.brain-map.org/api/v2/data/\" \\\n \"SectionDataSet/query.xml?\" \\\n \"criteria=products[id$eq1],\"\n# information to include\nURL_INCLUDE = \"&include=genes,plane_of_section\"\n\n# #an alternative to make the query, but has no type info\n# URL_PREFIX = \"http://api.brain-map.org/api/v2/data/\" \\\n# \"query.xml?include=model::Gene\"\n# #restrain the queries to mouse (products ID=1)\n# URL_INCLUDE = \",products[id$eq1]\"\n\nGENE_ENTRY_TYPES = [\n 'acronym',\n 'chromosome-id',\n 'ensembl-id',\n 'entrez-id',\n 'homologene-id',\n 'id',\n 'legacy-ensembl-gene-id',\n 'name',\n 'original-name',\n 'original-symbol',\n 'sphinx-id',\n]\n\n# the attributes of gene query\nGENE_ATTRIBUTES = [\n 'acronym',\n 'alias-tags',\n 'chromosome-id',\n 'ensembl-id',\n 'entrez-id',\n 'genomic-reference-update-id',\n 'homologene-id',\n 'id',\n 'legacy-ensembl-gene-id',\n 'name',\n 'organism-id',\n 'original-name',\n 'original-symbol',\n 'reference-genome-id',\n 'sphinx-id',\n 'version-status'\n]\n\n\ndef check_gene_validity(gene_id=None, gene_acronym=None, gene_name=None):\n \"\"\"\n check if a structure is valid or has records in the database.\n\n Parameters\n ----------\n gene_id : int, optional\n gene ID\n gene_acronym : str, optional\n gene acronym (case sensitive)\n gene_name : str, optional\n gene name (case sensitive)\n\n Returns\n -------\n validity : boolean\n if the gene has records in the database\n root : :obj:`Response`\n empty if query fails\n\n Raises\n ------\n TypeError\n if missing parameters\n\n Example\n -------\n >>> # check if gene ID 18376 is valid\n >>> validity, _ = check_structure_validity(gene_id=18376)\n >>> validity\n True\n >>> # check if structure Pdyn is valid\n >>> validity, root = check_structure_validity(gene_acronym='Pdyn')\n\n \"\"\"\n # if gene ID is given\n # preferred: id > acronym > name\n if gene_id is not None:\n query_url = URL_PREFIX + \\\n \"genes[id$eq{}]\".format(gene_id) + \\\n URL_INCLUDE\n elif gene_acronym is not None:\n query_url = URL_PREFIX + \\\n \"genes[acronym$eq'{}']\".format(gene_acronym) + \\\n URL_INCLUDE\n elif gene_name is not None:\n query_url = URL_PREFIX + \\\n \"genes[name$eq'{}']\".format(gene_name) + \\\n URL_INCLUDE\n else:\n raise TypeError(\n \"at least one gene identifier should be specified\"\n )\n # make the query\n print(\"access {}...\".format(query_url))\n r = requests.get(query_url)\n root = ET.fromstring(r.content)\n\n if root.attrib['total_rows'] != '0': # successful\n return True, root\n else:\n return False, root\n\n\ndef get_gene_info(\n gene_id=None,\n gene_acronym=None,\n gene_name=None,\n attributes='all'\n):\n \"\"\"\n get attributes of a gene.\n\n # multiple attributes\n gene_info = get_gene_info(\n gene_id=18376, attributes=['acronym', 'name']\n )\n # gene name\n gene_info['name']\n\n Parameters\n ----------\n gene_id : int, optional\n gene ID\n gene_acronym : str, optional\n gene acronym (case sensitive)\n gene_name : str, optional\n gene name (case sensitive)\n attributes : str or list, optional\n a single attribute or a list of attributes\n default: 'all', returning all the attributes\n available attributes:\n 'acronym',\n 'alias-tags',\n 'chromosome-id',\n 'ensembl-id',\n 'entrez-id',\n 'genomic-reference-update-id',\n 'homologene-id',\n 'id',\n 'legacy-ensembl-gene-id',\n 'name',\n 'organism-id',\n 'original-name',\n 'original-symbol',\n 'reference-genome-id',\n 'sphinx-id',\n 'version-status'\n\n Returns\n -------\n gene_info : int, str or dict\n if a single attribute is given, return an int or str\n if multiple attributes are given, return a dict\n {attr:value}. attr is str (attribute) and value is str or int\n\n Raises\n ------\n ValueError\n the gene given is invalid\n AttributeError\n only one attribute is given, and it is invalid\n\n Examples\n --------\n >>> # get gene name according to gene name 'Pdyn'\n >>> get_gene_info(gene_acronym='Pdyn', attributes='name')\n 'prodynorphin'\n >>> # get gene acronym according to gene id 18376\n >>> gene_acronym = get_gene_info(gene_id=18376, attributes='acronym')\n 'Pdyn'\n\n \"\"\"\n validity, root = check_gene_validity(\n gene_id=gene_id,\n gene_acronym=gene_acronym,\n gene_name=gene_name\n )\n if validity is False:\n raise ValueError(\n 'Gene {} is invalid. Try another gene.'\n .format(\n [\n item for item in [gene_id, gene_acronym, gene_name]\n if item is not None\n ][0]\n )\n )\n\n # if the query was successful\n if attributes == 'all':\n attr_list = GENE_ATTRIBUTES\n else:\n attr_list = attributes\n\n if isinstance(attr_list, list):\n gene_info = dict()\n for attr in attr_list:\n try:\n # extract the info of attr\n gene_info[attr] = _get_single_gene_attribute(\n root, attr\n )\n except AttributeError:\n print('There is no attribute called {}. '\n 'Skipped.'.format(attr))\n continue\n\n else: # single attribute is given\n # return the single attr value, or raise AttributeError\n return _get_single_gene_attribute(root, attributes)\n\n return gene_info\n\n\ndef _get_single_gene_attribute(root, attr):\n \"\"\"\n return a single attribute.\n\n Parameters\n ----------\n root : :obj:`Response`\n attr : str\n\n Returns\n -------\n int, str or None\n\n Raises\n ------\n AttributeError\n if attr doesn't exist\n\n \"\"\"\n item = root.findall(\n 'section-data-sets/section-data-set/'\n 'genes/gene/{}'.format(attr)\n )\n # check if attr is valid (if any information is found)\n if len(item) == 0:\n raise AttributeError(\n 'There is no gene attribute called {}'.format(attr)\n )\n\n # check data type\n # attr is an integer\n try:\n return int(item[0].text)\n except ValueError:\n return item[0].text\n except TypeError:\n # the attribute exists, but has no value\n return None\n", "sub_path": "abagen/mouse/gene.py", "file_name": "gene.py", "file_ext": "py", "file_size_in_byte": 6919, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "requests.get", "line_number": 112, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.fromstring", "line_number": 113, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 113, "usage_type": "name"}]} +{"seq_id": "482341567", "text": "# NtDocutils https://ntrrg.github.io/NtDocutils/\n# Copyright (c) 2017 Miguel Angel Rivera Notararigo\n# Licensed under The MIT License. See LICENSE file for full licensing details.\n\nfrom setuptools import setup, find_packages\nfrom os import path\n\nfrom ntdocutils import __version__, DESCRIPTION\n\nbasedir = path.abspath(path.dirname(__file__))\n\nwith open(path.join(basedir, \"README.rst\"), encoding=\"utf-8\") as readme:\n long_description = readme.read()\n\nsetup(\n name=\"NtDocutils\",\n version=__version__,\n description=DESCRIPTION,\n long_description=long_description,\n url=\"https://ntrrg.github.io/NtDocutils/\",\n author=\"Miguel Angel Rivera Notararigo\",\n author_email=\"ntrrgx@gmail.com\",\n license=\"MIT\",\n\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Console\",\n \"Intended Audience :: End Users/Desktop\",\n \"Intended Audience :: Other Audience\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: MIT License\",\n \"Topic :: Documentation\",\n \"Topic :: Software Development :: Documentation\",\n \"Topic :: Text Processing\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\"\n ],\n\n keywords=\"docutils restructuredtext docutils-theme documentation\",\n packages=find_packages(),\n install_requires=[\"docutils==0.13.1\", \"Pygments==2.2.0\"],\n include_package_data=True,\n\n entry_points={\n \"console_scripts\": [\n \"ntdocutils = ntdocutils.cmdline:main\",\n ]\n }\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1707, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "os.path.abspath", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "name"}, {"api_name": "setuptools.setup", "line_number": 15, "usage_type": "call"}, {"api_name": "ntdocutils.__version__", "line_number": 17, "usage_type": "name"}, {"api_name": "ntdocutils.DESCRIPTION", "line_number": 18, "usage_type": "name"}, {"api_name": "setuptools.find_packages", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "603607431", "text": "import math\nimport matplotlib.pyplot as plt\nimport numpy\nimport time\nimport typing as tp\n\nfrom cocos.numerics.numerical_package_bundle import (\n NumericalPackageBundle,\n CocosBundle\n)\n\nfrom cocos.numerics.random import randn_antithetic\nfrom cocos.device import (\n ComputeDeviceManager,\n info,\n sync\n)\n\nfrom cocos.multi_processing.device_pool import ComputeDevicePool\n\n\ndef simulate_heston_model(\n T: float,\n N: int,\n R: int,\n mu: float,\n kappa: float,\n v_bar: float,\n sigma_v: float,\n rho: float,\n x0: float,\n v0: float,\n numerical_package_bundle: tp.Type[NumericalPackageBundle]) \\\n -> tp.Tuple:\n \"\"\"\n This function simulates R paths from the Heston stochastic volatility model\n over a time horizon of length T divided into N steps.\n\n :param T: time horizon of the simulation\n :param N: number of steps\n :param R: number of paths to simulate\n :param mu: expected return\n :param kappa: mean-reversion speed of volatility\n :param v_bar: long-run mean of volatility\n :param sigma_v: volatility of volatility\n :param rho: instantaneous correlation of shocks to price and to volatility\n :param x0: initial log price\n :param v0: initial volatility\n :param gpu: whether to compute on the GPU\n :return: a tuple of two R-dimensional numeric arrays for log price and\n volatility\n \"\"\"\n np = numerical_package_bundle.module()\n random = numerical_package_bundle.random_module()\n\n Delta_t = T / float(N - 1)\n\n x = [np.full((R,), x0, dtype=numpy.float32),\n np.zeros((R,), dtype=numpy.float32)]\n\n v = [np.full((R,), v0, dtype=numpy.float32),\n np.zeros((R,), dtype=numpy.float32)]\n\n sqrt_delta_t = math.sqrt(Delta_t)\n sqrt_one_minus_rho_square = math.sqrt(1 - rho ** 2)\n\n m = np.zeros((2,), dtype=numpy.float32)\n m[0] = rho\n m[1] = sqrt_one_minus_rho_square\n\n t_current = 0\n for t in range(1, N):\n t_previous = (t + 1) % 2\n t_current = t % 2\n\n # generate antithetic standard normal random variables\n dBt = randn_antithetic(shape=(R, 2),\n antithetic_dimension=0,\n num_pack=np) * sqrt_delta_t\n\n sqrt_v_lag = np.sqrt(v[t_previous])\n x[t_current] = x[t_previous] \\\n + (mu - 0.5 * v[t_previous]) * Delta_t \\\n + np.multiply(sqrt_v_lag, dBt[:, 0])\n v[t_current] = v[t_previous] \\\n + kappa * (v_bar - v[t_previous]) * Delta_t \\\n + sigma_v * np.multiply(sqrt_v_lag, np.dot(dBt, m))\n v[t_current] = np.maximum(v[t_current], numpy.finfo(numpy.float32).eps)\n\n x = x[t_current]\n v = np.maximum(v[t_current], numpy.finfo(numpy.float32).eps)\n\n return x, v\n\n\ndef compute_option_price_from_simulated_paths(\n r: float,\n T: float,\n K: float,\n x_simulated,\n numerical_package_bundle: tp.Type[NumericalPackageBundle]) -> float:\n \"\"\"\n Compute the function of a plain-vanilla call option from simulated\n log-returns.\n\n :param r: the risk-free rate\n :param T: the time to expiration\n :param K: the strike price\n :param x_simulated: a numeric array of simulated log prices of the underlying\n :param numerical_package_bundle: a class implementing NumericalPackageBundle\n :return: option price\n \"\"\"\n\n num_pack = numerical_package_bundle.module()\n\n return float(math.exp(-r * T) * num_pack.mean(num_pack.maximum(num_pack.exp(x_simulated) - K, 0)))\n\n\ndef simulate_and_compute_option_price(\n x0: float,\n v0: float,\n r: float,\n rho: float,\n sigma_v: float,\n kappa: float,\n v_bar: float,\n T: float,\n K: float,\n nT: int,\n R: int,\n numerical_package_bundle: tp.Type[NumericalPackageBundle]) -> float:\n\n print(f'computing on device={ComputeDeviceManager.get_current_compute_device_id()}')\n\n # simulate random paths\n (x_simulated, v_simulated) \\\n = simulate_heston_model(\n T=T,\n N=nT,\n R=R,\n mu=r,\n kappa=kappa,\n v_bar=v_bar,\n sigma_v=sigma_v,\n rho=rho,\n x0=x0,\n v0=v0,\n numerical_package_bundle=numerical_package_bundle)\n\n # compute option price\n option_price \\\n = compute_option_price_from_simulated_paths(\n r=r,\n T=T,\n K=K,\n x_simulated=x_simulated,\n numerical_package_bundle=numerical_package_bundle)\n\n return option_price\n\n\ndef simulate_and_compute_option_price_gpu(\n x0: float,\n v0: float,\n r: float,\n rho: float,\n sigma_v: float,\n kappa: float,\n v_bar: float,\n T: float,\n K: float,\n nT: int,\n R: int,\n gpu_pool: tp.Optional[ComputeDevicePool] = None,\n number_of_batches: tp.Optional[int] = None) -> float:\n numerical_package_bundle = CocosBundle\n if number_of_batches is None:\n if gpu_pool is None:\n number_of_batches = 1\n else:\n number_of_batches = gpu_pool.number_of_devices\n\n kwargs = \\\n dict(x0=x0,\n v0=v0,\n r=r,\n rho=rho,\n sigma_v=sigma_v,\n kappa=kappa,\n v_bar=v_bar,\n T=T,\n K=K,\n nT=nT,\n numerical_package_bundle=numerical_package_bundle)\n\n if gpu_pool is None:\n kwargs['R'] = R\n print(f'computing {kwargs[\"R\"]} paths on single GPU')\n\n option_price = \\\n simulate_and_compute_option_price(\n **kwargs)\n\n else:\n kwargs['R'] = math.ceil(R / number_of_batches)\n print(f'computing {R} paths on {gpu_pool.number_of_devices} GPUs in '\n f'{number_of_batches} batches of {kwargs[\"R\"]} paths')\n\n option_price = \\\n gpu_pool.map_reduce(f=simulate_and_compute_option_price,\n reduction=lambda x, y: x + y / number_of_batches,\n initial_value=0.0,\n kwargs_list=number_of_batches * [kwargs])\n\n return option_price\n\n\ndef create_result_table(number_of_devices_to_runtime_map: tp.Dict[int, float]) \\\n -> str:\n res = \"\\n\"\n res += \"\\n\"\n res += \"\\n\"\n res += \"\\n\"\n res += \"\\n\"\n res += \"\\n\"\n res += \"\\n\"\n\n for number_of_devices, runtime \\\n in number_of_devices_to_runtime_map.items():\n res += \"\\n\"\n res += f\"\\n\"\n res += f\"\\n\"\n res += f\"\\n\"\n res += \"\\n\"\n\n res += \"
    Number of GPUsTotal Time in SecondsSpeedup Compared to Single GPU
    {number_of_devices}{runtime}{number_of_devices_to_runtime_map[1] / runtime}
    \"\n\n return res\n\n\ndef create_bar_plot(number_of_devices_to_runtime_map: tp.Dict[int, float]):\n objects = []\n performance = []\n\n for number_of_devices, runtime \\\n in number_of_devices_to_runtime_map.items():\n objects.append(number_of_devices)\n performance.append(number_of_devices_to_runtime_map[1] / runtime)\n\n y_pos = numpy.arange(len(objects))\n\n plt.figure(1)\n plt.bar(y_pos, performance, align='center', alpha=0.5)\n plt.xticks(y_pos, objects)\n plt.ylabel('Speedup Factor')\n plt.title('Performance Relative to a Single GPU \\n'\n 'in Monte Carlo Simulation of Heston Model \\n')\n\n plt.savefig(f'heston_pricing_benchmark_results_multi_gpu')\n\n plt.show()\n\n\nif __name__ == '__main__':\n info()\n\n gpu_pool = ComputeDevicePool()\n\n # model parameters\n x0 = 0.0 # initial log stock price\n v0 = 0.101 ** 2 # initial volatility\n r = math.log(1.0319) # risk-free rate\n rho = -0.7 # instantaneous correlation between Brownian motions\n sigma_v = 0.61 # variance of volatility\n kappa = 6.21 # mean reversion speed\n v_bar = 0.019 # mean variance\n\n # option parameters\n T = 1.0 # time to expiration\n K = 0.95 # strike price\n\n # simulation parameters\n nT = int(math.ceil(500 * T)) # number of time-steps to simulate\n\n # warm-up\n R = 20000 # actual number of paths to simulate for pricing\n\n kwargs = \\\n dict(x0=x0,\n v0=v0,\n r=r,\n rho=rho,\n sigma_v=sigma_v,\n kappa=kappa,\n v_bar=v_bar,\n T=T,\n K=K,\n nT=nT,\n R=R)\n\n print('warm-up')\n # tic = time.time()\n option_price = simulate_and_compute_option_price_gpu(gpu_pool=gpu_pool,\n **kwargs)\n # toc = time.time() - tic\n # print(f'option price = {option_price} computed in {toc} seconds')\n print('warmed up')\n\n # actual run\n R = 2000000 # actual number of paths to simulate for pricing\n\n kwargs = \\\n dict(x0=x0,\n v0=v0,\n r=r,\n rho=rho,\n sigma_v=sigma_v,\n kappa=kappa,\n v_bar=v_bar,\n T=T,\n K=K,\n nT=nT,\n R=R)\n\n number_of_devices_to_runtime_map = {}\n\n for i in range(1, gpu_pool.number_of_devices + 1):\n print(f'computing on {i} GPUs')\n tic = time.time()\n option_price = \\\n simulate_and_compute_option_price_gpu(gpu_pool=gpu_pool,\n number_of_batches=i,\n **kwargs)\n sync()\n gpu_time = time.time() - tic\n print(f'option price = {option_price} computed on {i} GPUs in '\n f'{gpu_time} seconds')\n\n number_of_devices_to_runtime_map[i] = gpu_time\n\n if gpu_pool.number_of_devices > 1:\n for i in range(2, gpu_pool.number_of_devices + 1):\n print(f'Performance on {i} GPUs increased by a factor of'\n f' {number_of_devices_to_runtime_map[1] / number_of_devices_to_runtime_map[i]} '\n f'over a single GPU.')\n\n result_table = create_result_table(number_of_devices_to_runtime_map)\n print(result_table)\n\n create_bar_plot(number_of_devices_to_runtime_map)\n", "sub_path": "examples/stochastic_volatility/heston_pricing_multi_gpu_example.py", "file_name": "heston_pricing_multi_gpu_example.py", "file_ext": "py", "file_size_in_byte": 10288, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "typing.Type", "line_number": 33, "usage_type": "attribute"}, {"api_name": "cocos.numerics.numerical_package_bundle.NumericalPackageBundle", "line_number": 33, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 58, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 59, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 61, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 62, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 64, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 67, "usage_type": "attribute"}, {"api_name": "cocos.numerics.random.randn_antithetic", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.finfo", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 88, "usage_type": "attribute"}, {"api_name": "numpy.finfo", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 91, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 34, "usage_type": "attribute"}, {"api_name": "typing.Type", "line_number": 101, "usage_type": "attribute"}, {"api_name": "cocos.numerics.numerical_package_bundle.NumericalPackageBundle", "line_number": 101, "usage_type": "name"}, {"api_name": "math.exp", "line_number": 116, "usage_type": "call"}, {"api_name": "typing.Type", "line_number": 131, "usage_type": "attribute"}, {"api_name": "cocos.numerics.numerical_package_bundle.NumericalPackageBundle", "line_number": 131, "usage_type": "name"}, {"api_name": "cocos.device.ComputeDeviceManager.get_current_compute_device_id", "line_number": 133, "usage_type": "call"}, {"api_name": "cocos.device.ComputeDeviceManager", "line_number": 133, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 174, "usage_type": "attribute"}, {"api_name": "cocos.multi_processing.device_pool.ComputeDevicePool", "line_number": 174, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 175, "usage_type": "attribute"}, {"api_name": "cocos.numerics.numerical_package_bundle.CocosBundle", "line_number": 176, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 205, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 218, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 241, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 250, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 252, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 252, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 253, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 253, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 254, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 254, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 255, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 255, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 256, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 256, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 259, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 259, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 261, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 261, "usage_type": "name"}, {"api_name": "cocos.device.info", "line_number": 265, "usage_type": "call"}, {"api_name": "cocos.multi_processing.device_pool.ComputeDevicePool", "line_number": 267, "usage_type": "call"}, {"api_name": "math.log", "line_number": 272, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 283, "usage_type": "call"}, {"api_name": "time.time", "line_number": 329, "usage_type": "call"}, {"api_name": "cocos.device.sync", "line_number": 334, "usage_type": "call"}, {"api_name": "time.time", "line_number": 335, "usage_type": "call"}]} +{"seq_id": "316147713", "text": "\n\n# Create your views here.\n\nfrom django.views import generic\nfrom django.views.generic.list import ListView\nfrom django.views.generic import DetailView\nfrom django.shortcuts import render, redirect,HttpResponse\n\nfrom .forms import DoctorForm,uploadForm\nfrom .models import Appointment,Doctor,Patient,Prescription,Order,Cart,Property\nfrom django.shortcuts import get_object_or_404\nfrom django.views.generic import View\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib import messages\nfrom django.core.mail import EmailMultiAlternatives\nimport razorpay\nfrom django.conf import settings\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import HttpResponseBadRequest\nfrom io import BytesIO\nimport datetime\nimport time\nfrom django.contrib.auth.models import User\nimport smtplib,ssl\nfrom django.template.loader import get_template\nfrom xhtml2pdf import pisa\nimport os\n\n\nrazorpay_client = razorpay.Client(\n\tauth=(settings.RAZOR_KEY_ID, settings.RAZOR_KEY_SECRET))\n\n \n \ndef link(request):\n a= User.objects.get(username = request.user)\n order_list = list(Order.objects.all().filter(user = a))\n \n current_meeting = None\n\n for order_db in order_list:\n docname = order_db.docname\n username= order_db.user.username\n doctor= Doctor.objects.get(username=docname)\n meeting = doctor.meeting_link\n start_time =order_db.datetime_start\n end_time = order_db.datetime_end\n # print(start_time)\n # print(end_time)\n #print(start_time.timestamp())\n #print(end_time.timestamp())\n # print(datetime.datetime.now().timestamp())\n now = datetime.datetime.now() # current date and time\n\n\n year = int(start_time.strftime(\"%Y\"))\n print(\"year:\", year)\n \n month = int(start_time.strftime(\"%m\"))\n print(\"month:\", month)\n \n day = int(start_time.strftime(\"%d\"))\n print(\"day:\", day)\n\n h = int(start_time.strftime(\"%H\"))\n print(\"HOUR\", h)\n \n min = int(start_time.strftime(\"%M\"))\n print(\"MIN:\", min)\n \n sec = int(start_time.strftime(\"%S\"))\n print(\"sec\", sec)\n \n \n start_time = datetime.datetime(year,month,day, h ,min,sec)\n\n\n\n year = int(end_time.strftime(\"%Y\"))\n print(\"year:\", year)\n \n month = int(end_time.strftime(\"%m\"))\n print(\"month:\", month)\n \n day = int(end_time.strftime(\"%d\"))\n print(\"day:\", day)\n\n h = int(end_time.strftime(\"%H\"))\n print(\"HOUR\", h)\n \n min = int(end_time.strftime(\"%M\"))\n print(\"MIN:\", min)\n \n sec = int(end_time.strftime(\"%S\"))\n print(\"sec\", sec)\n \n \n end_time = datetime.datetime(year,month,day, h ,min,sec)\n\n\n\n\n year = int(now.strftime(\"%Y\"))\n print(\"year:\", year)\n \n month = int(now.strftime(\"%m\"))\n print(\"month:\", month)\n \n day = int(now.strftime(\"%d\"))\n print(\"day:\", day)\n\n h = int(now.strftime(\"%H\"))\n print(\"HOUR\", h)\n \n min = int(now.strftime(\"%M\"))\n print(\"MIN:\", min)\n \n sec = int(now.strftime(\"%S\"))\n print(\"sec\", sec)\n \n \n datecur = datetime.datetime(year,month,day, h ,min,sec)\n print(datecur)\n \n print(datecur.timestamp())\n print(start_time.timestamp())\n print(end_time.timestamp())\n\n if ((datecur.timestamp()< start_time.timestamp()) or (datecur.timestamp()>end_time.timestamp())):\n continue\n \n current_meeting = meeting\n print(current_meeting)\n if current_meeting == None:\n return render(request, 'nomeetings.html')\n else:\n return render(request, 'linktoclick.html', {'obj': current_meeting})\n\n \n \n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n@login_required\ndef appointment(request,user_name):\n doctor5 = get_object_or_404(Doctor, username=user_name)\n \n\n if request.method=='POST':\n \n \n \n patientname= request.POST.get('name')\n patientage= request.POST.get('age')\n\n if not Patient.objects.filter(username=request.user.username).exists():\n PatientProfile= Patient.objects.create(username = request.user.username,name =patientname,age=patientage)\n PatientProfile.save()\n \n irr = request.POST.get('options')\n print(\"edfregdvredgggw\",irr)\n \n \n if irr=='11':\n date=datetime.datetime.today()\n timeslot='09:00-10:00'\n if irr=='12':\n date=datetime.datetime.today()\n timeslot='10:00-11:00'\n if irr=='13':\n date=datetime.datetime.today()\n timeslot='11:00-12:00'\n if irr=='14':\n date=datetime.datetime.today()\n timeslot='12:00-13:00'\n if irr=='21':\n date= datetime.datetime.today() + datetime.timedelta(days=1)\n timeslot='09:00-10:00'\n if irr=='22':\n date= datetime.datetime.today() + datetime.timedelta(days=1)\n timeslot='10:00-11:00'\n if irr=='23':\n date= datetime.datetime.today() + datetime.timedelta(days=1)\n timeslot='11:00-12:00'\n if irr=='24':\n date= datetime.datetime.today() + datetime.timedelta(days=1)\n timeslot='12:00-13:00' \n\n if irr=='31':\n date= datetime.datetime.today() + datetime.timedelta(days=2)\n timeslot='09:00-10:00'\n if irr=='32':\n date= datetime.datetime.today() + datetime.timedelta(days=2)\n timeslot='10:00-11:00'\n if irr=='33':\n date= datetime.datetime.today() + datetime.timedelta(days=2)\n timeslot='11:00-12:00'\n if irr=='34':\n date= datetime.datetime.today() + datetime.timedelta(days=2)\n timeslot='12:00-13:00' \n\n if irr=='41':\n date= datetime.datetime.today() + datetime.timedelta(days=3)\n timeslot='09:00-10:00'\n if irr=='42':\n date= datetime.datetime.today() + datetime.timedelta(days=3)\n timeslot='10:00-11:00'\n if irr=='43':\n date= datetime.datetime.today() + datetime.timedelta(days=3)\n timeslot='11:00-12:00'\n if irr=='44':\n date= datetime.datetime.today() + datetime.timedelta(days=3)\n timeslot='12:00-13:00' \n\n if irr=='51':\n date= datetime.datetime.today() + datetime.timedelta(days=4)\n timeslot='09:00-10:00'\n if irr=='52':\n date= datetime.datetime.today() + datetime.timedelta(days=4)\n timeslot='10:00-11:00'\n if irr=='53':\n date= datetime.datetime.today() + datetime.timedelta(days=4)\n timeslot='11:00-12:00'\n if irr=='54':\n date= datetime.datetime.today() + datetime.timedelta(days=4)\n timeslot='12:00-13:00' \n \n\n post= Appointment(patient_name = patientname,patient_age=patientage,user=request.user,ready_for_payment = True,doctor_name = user_name,date=date,timeslot=timeslot)\n \n post.save()\n\n \n\n\n year = int(post.date.strftime(\"%Y\"))\n # print(year)\n # print(type(year),\"fgt\")\n month = int(post.date.strftime(\"%m\"))\n day = int(post.date.strftime(\"%d\"))\n #print(\"jgvuuuuuuu\",post.datetime_start)\n \n if post.timeslot == '09:00-10:00':\n post.datetime_start=datetime.datetime(year,month,day, 9 ,0,0)\n post.datetime_end=datetime.datetime(year,month,day, 10 ,0,0)\n if post.timeslot == '10:00-11:00':\n post.datetime_start=datetime.datetime(year,month,day, 10 ,0,0)\n post.datetime_end=datetime.datetime(year,month,day, 11 ,0,0)\n if post.timeslot == '11:00-12:00':\n post.datetime_start=datetime.datetime(year,month,day, 11 ,0,0)\n post.datetime_end=datetime.datetime(year,month,day, 12 ,0,0)\n if post.timeslot == '12:00-01:00':\n post.datetime_start=datetime.datetime(year,month,day, 12 ,0,0)\n post.datetime_end=datetime.datetime(year,month,day, 1 ,0,0) \n post.save()\n \n datetime_start = post.datetime_start\n datetime_end = post.datetime_end\n # print(\"jgv\",post.datetime_start)\n price_obj=Doctor.objects.get(username=user_name)\n price=price_obj.doc_price\n if Cart.objects.filter(username = request.user.username).first():\n Cart.objects.filter(username = request.user.username).update(username=request.user.username,docname=user_name,price=price,datetime_start=datetime_start,datetime_end=datetime_end)\n else:\n Cart.objects.create(username=request.user.username,docname=user_name,price=price,datetime_start=datetime_start,datetime_end=datetime_end )\n\n order_db = Order.objects.filter(docname=user_name).filter(datetime_start= datetime_start ).count() \n if order_db== 5:\n return render(request, 'fail.html')\n \n return redirect('/doctor/indexpay/')\n \n \n \n \n else:\n context={}\n \n obj1 = datetime.datetime.today()\n obj2 = datetime.datetime.today() + datetime.timedelta(days=1)\n obj3 = datetime.datetime.today() + datetime.timedelta(days=2)\n obj4 = datetime.datetime.today() + datetime.timedelta(days=3)\n obj5 = datetime.datetime.today() + datetime.timedelta(days=4)\n obj6 = datetime.datetime.today() + datetime.timedelta(days=5)\n \n \n\n \n \n year1 = int(obj1.strftime(\"%Y\"))\n # print(year)\n # print(type(year),\"fgt\")\n month1 = int(obj1.strftime(\"%m\"))\n day1 = int(obj1.strftime(\"%d\"))\n\n year2 = int(obj2.strftime(\"%Y\"))\n # print(year)\n # print(type(year),\"fgt\")\n month2 = int(obj2.strftime(\"%m\"))\n day2 = int(obj2.strftime(\"%d\"))\n\n year3 = int(obj3.strftime(\"%Y\"))\n # print(year)\n # print(type(year),\"fgt\")\n month3 = int(obj3.strftime(\"%m\"))\n day3 = int(obj3.strftime(\"%d\"))\n \n year4 = int(obj4.strftime(\"%Y\"))\n # print(year)\n # print(type(year),\"fgt\")\n month4 = int(obj4.strftime(\"%m\"))\n day4 = int(obj4.strftime(\"%d\"))\n\n\n year5 = int(obj5.strftime(\"%Y\"))\n # print(year)\n # print(type(year),\"fgt\")\n month5 = int(obj5.strftime(\"%m\"))\n day5 = int(obj5.strftime(\"%d\"))\n\n year6 = int(obj6.strftime(\"%Y\"))\n # print(year)\n # print(type(year),\"fgt\")\n month6 = int(obj6.strftime(\"%m\"))\n day6 = int(obj6.strftime(\"%d\"))\n\n \n order_db11 = Order.objects.filter(docname=user_name).filter(datetime_start= datetime.datetime(year1,month1,day1, 9 ,0,0) ).count() \n order_db12 = Order.objects.filter(docname=user_name).filter(datetime_start= datetime.datetime(year1,month1,day1, 10,0,0) ).count() \n order_db13 = Order.objects.filter(docname=user_name).filter(datetime_start= datetime.datetime(year1,month1,day1, 11 ,0,0) ).count() \n order_db14 = Order.objects.filter(docname=user_name).filter(datetime_start= datetime.datetime(year1,month1,day1, 12 ,0,0) ).count() \n\n order_db21 = Order.objects.filter(docname=user_name).filter(datetime_start= datetime.datetime(year2,month2,day2, 9 ,0,0) ).count() \n order_db22 = Order.objects.filter(docname=user_name).filter(datetime_start= datetime.datetime(year2,month2,day2, 10,0,0) ).count() \n order_db23 = Order.objects.filter(docname=user_name).filter(datetime_start= datetime.datetime(year2,month2,day2, 11 ,0,0) ).count() \n order_db24 = Order.objects.filter(docname=user_name).filter(datetime_start= datetime.datetime(year2,month2,day2, 12 ,0,0) ).count()\n \n order_db31 = Order.objects.filter(docname=user_name).filter(datetime_start= datetime.datetime(year3,month3,day3, 9 ,0,0) ).count() \n order_db32 = Order.objects.filter(docname=user_name).filter(datetime_start= datetime.datetime(year3,month3,day3, 10,0,0) ).count() \n order_db33 = Order.objects.filter(docname=user_name).filter(datetime_start= datetime.datetime(year3,month3,day3, 11 ,0,0) ).count() \n order_db34 = Order.objects.filter(docname=user_name).filter(datetime_start= datetime.datetime(year3,month3,day3, 12 ,0,0) ).count()\n \n order_db41 = Order.objects.filter(docname=user_name).filter(datetime_start= datetime.datetime(year4,month4,day4, 9 ,0,0) ).count() \n order_db42 = Order.objects.filter(docname=user_name).filter(datetime_start= datetime.datetime(year4,month4,day4, 10,0,0) ).count() \n order_db43 = Order.objects.filter(docname=user_name).filter(datetime_start= datetime.datetime(year4,month4,day4, 11 ,0,0) ).count() \n order_db44 = Order.objects.filter(docname=user_name).filter(datetime_start= datetime.datetime(year4,month4,day4, 12 ,0,0) ).count()\n \n order_db51 = Order.objects.filter(docname=user_name).filter(datetime_start= datetime.datetime(year5,month5,day5, 9 ,0,0) ).count() \n order_db52 = Order.objects.filter(docname=user_name).filter(datetime_start= datetime.datetime(year5,month5,day5, 10,0,0) ).count() \n order_db53 = Order.objects.filter(docname=user_name).filter(datetime_start= datetime.datetime(year5,month5,day5, 11 ,0,0) ).count() \n order_db54 = Order.objects.filter(docname=user_name).filter(datetime_start= datetime.datetime(year5,month5,day5, 12 ,0,0) ).count()\n \n order_db61 = Order.objects.filter(docname=user_name).filter(datetime_start= datetime.datetime(year6,month6,day6, 9 ,0,0) ).count() \n order_db62 = Order.objects.filter(docname=user_name).filter(datetime_start= datetime.datetime(year6,month6,day6, 10,0,0) ).count() \n order_db63 = Order.objects.filter(docname=user_name).filter(datetime_start= datetime.datetime(year6,month6,day6, 11 ,0,0) ).count() \n order_db64 = Order.objects.filter(docname=user_name).filter(datetime_start= datetime.datetime(year6,month6,day6, 12 ,0,0) ).count()\n \n \n context['day1']=str(day1)+'/'+str(month1)+'/'+str(year1)\n context['day2']=str(day2)+'/'+str(month2)+'/'+str(year2)\n context['day3']=str(day3)+'/'+str(month3)+'/'+str(year3)\n context['day4']=str(day4)+'/'+str(month4)+'/'+str(year4)\n context['day5']=str(day5)+'/'+str(month5)+'/'+str(year5)\n context['day6']=str(day6)+'/'+str(month6)+'/'+str(year6)\n\n\n\n\n context['obj11']=5-order_db11\n context['obj12']=5-order_db12\n context['obj13']=5-order_db13\n context['obj14']=5-order_db14\n \n\n context['obj21']=5-order_db21\n context['obj22']=5-order_db22\n context['obj23']=5-order_db23\n context['obj24']=5-order_db24\n\n context['obj31']=5-order_db31\n context['obj32']=5-order_db32\n context['obj33']=5-order_db33\n context['obj34']=5-order_db34\n \n context['obj41']=5-order_db41\n context['obj42']=5-order_db42\n context['obj43']=5-order_db43\n context['obj44']=5-order_db44\n\n context['obj51']=5-order_db51\n context['obj52']=5-order_db52\n context['obj53']=5-order_db53\n context['obj54']=5-order_db54\n\n context['obj61']=5-order_db61\n context['obj62']=5-order_db62\n context['obj63']=5-order_db63\n context['obj64']=5-order_db64\n a=b=c=d=0\n\n price_obj=Doctor.objects.get(username=user_name)\n if '09:00-10:00' in price_obj.timeslots.values_list('name', flat=True):\n a=1\n if '10:00-11:00' in price_obj.timeslots.values_list('name', flat=True):\n b=1\n\n if '11:00-12:00' in price_obj.timeslots.values_list('name', flat=True):\n c=1\n\n if '12:00-13:00' in price_obj.timeslots.values_list('name', flat=True):\n d=1 \n\n print(price_obj.timeslots.values_list('name', flat=True))\n \n if obj1.isoweekday()==7:\n context['obj11']= context['obj12']= context['obj13']= context['obj14']=0\n\n if obj2.isoweekday()==7:\n context['obj21']= context['obj22']= context['obj23']= context['obj24']=0\n\n if obj3.isoweekday()==7:\n context['obj31']= context['obj32']= context['ob313']= context['obj34']=0\n\n if obj4.isoweekday()==7:\n context['obj41']= context['obj42']= context['obj43']= context['obj44']=0\n\n if obj5.isoweekday()==7:\n context['obj51']= context['obj52']= context['obj53']= context['obj54']=0\n\n if obj6.isoweekday()==7:\n context['obj61']= context['obj62']= context['obj63']= context['obj64']=0 \n\n context['a']=a\n context['b']=b\n context['c']=c\n context['d']=d\n context['doctor']=doctor5\n \n return render(request,\"appointment.html\",context)\n\n\n\n@login_required\ndef doctor(request):\n if request.method=='POST':\n form=DoctorForm(request.POST, request.FILES)\n if form.is_valid():\n \n if Doctor.objects.filter(username = request.user.username).first():\n messages.success(request, 'You have already registered as a doctor')\n return render(request,\"fail.html\")\n \n post = form.save(commit=False)\n post.username = request.user.username\n post.save()\n \n \n \n selected_categories = form.cleaned_data.get('timeslots')\n for title in selected_categories:\n category_obj = Property.objects.get(name=title) #get object by title i.e I declared unique for title under Category model\n post.timeslots.add(category_obj) #now add each category object to the saved form object\n #users=Doctor.objects.all()\n context={}\n return redirect('/')\n \n else:\n return render(request,\"fail.html\")\n else:\n context={}\n context['form']=DoctorForm()\n return render(request,\"doctor.html\",context)\n\n@login_required \ndef display(request):\n context={}\n context['appointment']=Appointment.objects.all()\n return render(request,\"display.html\",context)\n\n\nclass DoctorListView(LoginRequiredMixin,generic.ListView):\n model = Doctor\n template_name = 'doctorList.html' \n \n \n\nclass ProfileView( LoginRequiredMixin,View):\n def get(self, request, user_name):\n user_obj = Doctor.objects.get(username=user_name)\n print(user_obj.timeslots.values_list('name', flat=True))\n \n param = {'user_data':user_obj}\n return render(request, 'doctorprofile.html', param)\n\n\nclass ProfileViewPatient( LoginRequiredMixin,View):\n\n def get(self, request):\n user_obj = Patient.objects.get(username=request.user.username)\n \n \n param = {'user_data':user_obj}\n return render(request, 'patientprofile.html', param)\n\n\nclass DeleteCartView( LoginRequiredMixin,ListView):\n model = Cart\n def get(self, request, user_name):\n delete_post = self.model.objects.get(pk=user_name)\n \n delete_post.delete()\n \n\n\n\n\n\nclass DeleteView( LoginRequiredMixin,ListView):\n model = Doctor\n def get(self, request, user_name):\n delete_post = self.model.objects.get(pk=user_name)\n user2=delete_post.username\n print(delete_post.username)\n delete_post.delete()\n messages.success(request, 'Your post has been deleted successfully.')\n return redirect('/')\n\n\n@login_required\ndef doctor_detail_view(request, primary_key):\n doctor = get_object_or_404(Doctor, pk=primary_key)\n return render(request, 'doctor_detail.html', context={'doctor': doctor})\n\n\ndef UploadViewPatient(request):\n if request.method=='POST':\n form=uploadForm(request.POST, request.FILES)\n if form.is_valid():\n \n post = form.save(commit=False)\n post.doctor = Doctor(username=request.user)\n post.save()\n patient_obj= Patient(username = post.patientUsername)\n patient_obj.save()\n patient_obj.prescription.add(post)\n \n return redirect('/')\n else:\n context={}\n context['form']=uploadForm(request.POST)\n return render(request,\"fail.html\",context)\n else:\n context={}\n context['form']=uploadForm()\n return render(request,\"upload_file.html\",context) \n\n@login_required \ndef doctor_detail_view(request, user_name):\n try:\n doctor = Doctor.objects.get(pk=user_name)\n except Doctor.DoesNotExist:\n raise Http404('Doctor does not exist')\n\n return render(request, 'doctor_detail.html', context={'doctor': doctor})\n\n\n\n\n\n\n\n@login_required \ndef indexpay(request):\n currency = 'INR'\n \n \n \n cart = Cart.objects.get(username = request.user.username)\n \n amount=cart.price*100\n razorpay_order = razorpay_client.order.create(dict(amount=amount,\n\t\t\t\t\t\t\t\t\t\t\t\t\tcurrency=currency,\n\t\t\t\t\t\t\t\t\t\t\t\t\tpayment_capture='0'))\n\n\t# order id of newly created order.\n razorpay_order_id = razorpay_order['id']\n \n order = Order.objects.create(user = request.user, total_amount = cart.price,razorpay_order_id = razorpay_order['id'])\n\n \n \n callback_url = f'/doctor/paymenthandler/'\n \n\n\t# we need to pass these details to frontend.\n context = {}\n context['razorpay_order_id'] = razorpay_order_id\n context['razorpay_merchant_key'] = settings.RAZOR_KEY_ID\n context['razorpay_amount'] = amount\n context['currency'] = currency\n context['callback_url'] = callback_url\n return render(request, 'indexpay.html', context=context)\n\n\n# we need to csrf_exempt this url as\n# POST request will be made by Razorpay\n# and it won't have the csrf token.\n\n# for generating pdf invoice\n\n\n\ndef fetch_resources(uri, rel):\n path = os.path.join(uri.replace(settings.STATIC_URL, \"\"))\n return path\n\ndef render_to_pdf(template_src, context_dict={}):\n template = get_template(template_src)\n html = template.render(context_dict)\n result = BytesIO()\n pdf = pisa.pisaDocument(BytesIO(html.encode(\"ISO-8859-1\")), result)#, link_callback=fetch_resources)\n if not pdf.err:\n return HttpResponse(result.getvalue(), content_type='application/pdf')\n return None\n\n\n\n\n@csrf_exempt\ndef paymenthandler(request):\n \n # only accept POST request.\n if request.method == \"POST\":\n try:\n \n # get the required parameters from post request.\n payment_id = request.POST.get('razorpay_payment_id', '')\n razorpay_order_id = request.POST.get('razorpay_order_id', '')\n signature = request.POST.get('razorpay_signature', '')\n params_dict = {\n 'razorpay_order_id': razorpay_order_id,\n 'razorpay_payment_id': payment_id,\n 'razorpay_signature': signature\n }\n order_db = Order.objects.get(razorpay_order_id= razorpay_order_id)\n # verify the payment signature.\n order_db.razorpay_payment_id = payment_id\n order_db.razorpay_signature = signature\n order_db.save()\n result = razorpay_client.utility.verify_payment_signature(\n params_dict)\n if result is None:\n cart = Cart.objects.get(username = request.user.username)\n \n amount=cart.price*100\n \n try:\n order_db.payment_status = 1\n order_db.save()\n # capture the payemt\n razorpay_client.payment.capture(payment_id, amount)\n template = get_template('invoice.html')\n patient_obj= Patient.objects.get(username= order_db.user.username)\n name= patient_obj.name\n doctor = Cart.objects.get(username=request.user.username).docname\n docprice = Cart.objects.get(username=request.user.username).price\n data = {\n 'order_id': order_db.order_id,\n 'transaction_id': order_db.razorpay_payment_id,\n 'user_email': order_db.user.email,\n 'date': str(order_db.datetime_of_payment),\n 'name': name,\n 'doctor':doctor,\n 'price':docprice,\n 'amount': order_db.total_amount,\n }\n html = template.render(data)\n result = BytesIO()\n pdf = pisa.pisaDocument(BytesIO(html.encode(\"ISO-8859-1\")), result)#, link_callback=fetch_resources)\n pdf = result.getvalue()\n filename = 'Invoice_' + data['order_id'] + '.pdf'\n\n mail_subject = 'Recent Order Details'\n # message = render_to_string('firstapp/payment/emailinvoice.html', {\n # 'user': order_db.user,\n # 'order': order_db\n # })\n\n context_dict = {\n 'user': order_db.user,\n 'name':name,\n 'order': order_db\n }\n\n template = get_template('emailinvoice.html')\n message = template.render(context_dict)\n to_email = order_db.user.email\n email = EmailMultiAlternatives(\n mail_subject,\n \"hello\", # necessary to pass some message here\n settings.EMAIL_HOST_USER,\n [to_email]\n )\n email.attach_alternative(message, \"text/html\")\n email.attach(filename, pdf, 'application/pdf')\n email.send(fail_silently=False)\n\n c = Cart.objects.get(username=request.user.username)\n c.order_id = order_db.order_id\n c.save()\n order_db.docname = c.docname\n order_db.datetime_start =c.datetime_start\n order_db.datetime_end = c.datetime_end\n order_db.save()\n\n cart = Cart.objects.get(username = request.user.username)\n \n cart.delete()\n \n # render success page on successful caputre of payment\n return render(request,'paymentsuccess.html')\n except:\n order_db.payment_status = 2\n order_db.save()\n # if there is an error while capturing payment.\n return render(request, 'paymentfail.html')\n else:\n order_db.payment_status = 2\n order_db.save()\n # if signature verification fails.\n return render(request, 'paymentfail.html')\n except:\n \n # if we don't find the required parameters in POST data\n return HttpResponseBadRequest()\n else:\n # if other than POST request is made.\n return HttpResponseBadRequest()\n\n\n\n\n\nclass GenerateInvoice(View):\n def get(self, request, pk, *args, **kwargs):\n try:\n order_db = Order.objects.get(id = pk, user = request.user, payment_status = 1) #you can filter using order_id as well\n except:\n return HttpResponse(\"505 Not Found\")\n data = {\n 'order_id': order_db.order_id,\n 'transaction_id': order_db.razorpay_payment_id,\n 'user_email': order_db.user.email,\n 'date': str(order_db.datetime_of_payment),\n 'name': order_db.user.name,\n 'order': order_db,\n 'amount': order_db.total_amount,\n }\n pdf = render_to_pdf('invoice.html', data)\n #return HttpResponse(pdf, content_type='application/pdf')\n\n # force download\n if pdf:\n response = HttpResponse(pdf, content_type='application/pdf')\n filename = \"Invoice_%s.pdf\" %(data['order_id'])\n content = \"inline; filename='%s'\" %(filename)\n #download = request.GET.get(\"download\")\n #if download:\n content = \"attachment; filename=%s\" %(filename)\n response['Content-Disposition'] = content\n return response\n return HttpResponse(\"Not found\")\n\n", "sub_path": "doctorconsult/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 29296, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "razorpay.Client", "line_number": 33, "usage_type": "call"}, {"api_name": "django.conf.settings.RAZOR_KEY_ID", "line_number": 34, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 34, "usage_type": "name"}, {"api_name": "django.conf.settings.RAZOR_KEY_SECRET", "line_number": 34, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 39, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 39, "usage_type": "name"}, {"api_name": "models.Order.objects.all", "line_number": 40, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 40, "usage_type": "name"}, {"api_name": "models.Doctor.objects.get", "line_number": 47, "usage_type": "call"}, {"api_name": "models.Doctor.objects", "line_number": 47, "usage_type": "attribute"}, {"api_name": "models.Doctor", "line_number": 47, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 56, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 56, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 78, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 101, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 125, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 138, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 140, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 164, "usage_type": "call"}, {"api_name": "models.Doctor", "line_number": 164, "usage_type": "argument"}, {"api_name": "models.Patient.objects.filter", "line_number": 174, "usage_type": "call"}, {"api_name": "models.Patient.objects", "line_number": 174, "usage_type": "attribute"}, {"api_name": "models.Patient", "line_number": 174, "usage_type": "name"}, {"api_name": "models.Patient.objects.create", "line_number": 175, "usage_type": "call"}, {"api_name": "models.Patient.objects", "line_number": 175, "usage_type": "attribute"}, {"api_name": "models.Patient", "line_number": 175, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 183, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 183, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 186, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 186, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 189, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 189, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 192, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 192, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 195, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 195, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 195, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 198, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 198, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 198, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 201, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 201, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 201, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 204, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 204, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 204, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 208, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 208, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 208, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 211, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 211, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 211, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 214, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 214, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 214, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 217, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 217, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 217, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 221, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 221, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 221, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 224, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 224, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 224, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 227, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 227, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 227, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 230, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 230, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 230, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 234, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 234, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 234, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 237, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 237, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 237, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 240, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 240, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 240, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 243, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 243, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 243, "usage_type": "call"}, {"api_name": "models.Appointment", "line_number": 247, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 262, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 263, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 265, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 266, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 268, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 269, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 271, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 272, "usage_type": "call"}, {"api_name": "models.Doctor.objects.get", "line_number": 278, "usage_type": "call"}, {"api_name": "models.Doctor.objects", "line_number": 278, "usage_type": "attribute"}, {"api_name": "models.Doctor", "line_number": 278, "usage_type": "name"}, {"api_name": "models.Cart.objects.filter", "line_number": 280, "usage_type": "call"}, {"api_name": "models.Cart.objects", "line_number": 280, "usage_type": "attribute"}, {"api_name": "models.Cart", "line_number": 280, "usage_type": "name"}, {"api_name": "models.Cart.objects.filter", "line_number": 281, "usage_type": "call"}, {"api_name": "models.Cart.objects", "line_number": 281, "usage_type": "attribute"}, {"api_name": "models.Cart", "line_number": 281, "usage_type": "name"}, {"api_name": "models.Cart.objects.create", "line_number": 283, "usage_type": "call"}, {"api_name": "models.Cart.objects", "line_number": 283, "usage_type": "attribute"}, {"api_name": "models.Cart", "line_number": 283, "usage_type": "name"}, {"api_name": "models.Order.objects.filter", "line_number": 285, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 285, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 285, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 287, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 289, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 297, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 297, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 298, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 298, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 298, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 299, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 299, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 299, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 300, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 300, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 300, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 301, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 301, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 301, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 302, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 302, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 302, "usage_type": "call"}, {"api_name": "models.Order.objects.filter", "line_number": 346, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 346, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 346, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 346, "usage_type": "call"}, {"api_name": "models.Order.objects.filter", "line_number": 347, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 347, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 347, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 347, "usage_type": "call"}, {"api_name": "models.Order.objects.filter", "line_number": 348, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 348, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 348, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 348, "usage_type": "call"}, {"api_name": "models.Order.objects.filter", "line_number": 349, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 349, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 349, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 349, "usage_type": "call"}, {"api_name": "models.Order.objects.filter", "line_number": 351, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 351, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 351, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 351, "usage_type": "call"}, {"api_name": "models.Order.objects.filter", "line_number": 352, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 352, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 352, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 352, "usage_type": "call"}, {"api_name": "models.Order.objects.filter", "line_number": 353, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 353, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 353, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 353, "usage_type": "call"}, {"api_name": "models.Order.objects.filter", "line_number": 354, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 354, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 354, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 354, "usage_type": "call"}, {"api_name": "models.Order.objects.filter", "line_number": 356, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 356, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 356, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 356, "usage_type": "call"}, {"api_name": "models.Order.objects.filter", "line_number": 357, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 357, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 357, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 357, "usage_type": "call"}, {"api_name": "models.Order.objects.filter", "line_number": 358, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 358, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 358, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 358, "usage_type": "call"}, {"api_name": "models.Order.objects.filter", "line_number": 359, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 359, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 359, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 359, "usage_type": "call"}, {"api_name": "models.Order.objects.filter", "line_number": 361, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 361, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 361, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 361, "usage_type": "call"}, {"api_name": "models.Order.objects.filter", "line_number": 362, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 362, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 362, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 362, "usage_type": "call"}, {"api_name": "models.Order.objects.filter", "line_number": 363, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 363, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 363, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 363, "usage_type": "call"}, {"api_name": "models.Order.objects.filter", "line_number": 364, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 364, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 364, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 364, "usage_type": "call"}, {"api_name": "models.Order.objects.filter", "line_number": 366, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 366, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 366, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 366, "usage_type": "call"}, {"api_name": "models.Order.objects.filter", "line_number": 367, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 367, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 367, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 367, "usage_type": "call"}, {"api_name": "models.Order.objects.filter", "line_number": 368, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 368, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 368, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 368, "usage_type": "call"}, {"api_name": "models.Order.objects.filter", "line_number": 369, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 369, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 369, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 369, "usage_type": "call"}, {"api_name": "models.Order.objects.filter", "line_number": 371, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 371, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 371, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 371, "usage_type": "call"}, {"api_name": "models.Order.objects.filter", "line_number": 372, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 372, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 372, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 372, "usage_type": "call"}, {"api_name": "models.Order.objects.filter", "line_number": 373, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 373, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 373, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 373, "usage_type": "call"}, {"api_name": "models.Order.objects.filter", "line_number": 374, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 374, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 374, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 374, "usage_type": "call"}, {"api_name": "models.Doctor.objects.get", "line_number": 419, "usage_type": "call"}, {"api_name": "models.Doctor.objects", "line_number": 419, "usage_type": "attribute"}, {"api_name": "models.Doctor", "line_number": 419, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 457, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 162, "usage_type": "name"}, {"api_name": "forms.DoctorForm", "line_number": 464, "usage_type": "call"}, {"api_name": "models.Doctor.objects.filter", "line_number": 467, "usage_type": "call"}, {"api_name": "models.Doctor.objects", "line_number": 467, "usage_type": "attribute"}, {"api_name": "models.Doctor", "line_number": 467, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 468, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 468, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 469, "usage_type": "call"}, {"api_name": "models.Property.objects.get", "line_number": 479, "usage_type": "call"}, {"api_name": "models.Property.objects", "line_number": 479, "usage_type": "attribute"}, {"api_name": "models.Property", "line_number": 479, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 483, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 486, "usage_type": "call"}, {"api_name": "forms.DoctorForm", "line_number": 489, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 490, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 461, "usage_type": "name"}, {"api_name": "models.Appointment.objects.all", "line_number": 495, "usage_type": "call"}, {"api_name": "models.Appointment.objects", "line_number": 495, "usage_type": "attribute"}, {"api_name": "models.Appointment", "line_number": 495, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 496, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 492, "usage_type": "name"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 499, "usage_type": "name"}, {"api_name": "django.views.generic.ListView", "line_number": 499, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 499, "usage_type": "name"}, {"api_name": "models.Doctor", "line_number": 500, "usage_type": "name"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 505, "usage_type": "name"}, {"api_name": "django.views.generic.View", "line_number": 505, "usage_type": "name"}, {"api_name": "models.Doctor.objects.get", "line_number": 507, "usage_type": "call"}, {"api_name": "models.Doctor.objects", "line_number": 507, "usage_type": "attribute"}, {"api_name": "models.Doctor", "line_number": 507, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 511, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 514, "usage_type": "name"}, {"api_name": "django.views.generic.View", "line_number": 514, "usage_type": "name"}, {"api_name": "models.Patient.objects.get", "line_number": 517, "usage_type": "call"}, {"api_name": "models.Patient.objects", "line_number": 517, "usage_type": "attribute"}, {"api_name": "models.Patient", "line_number": 517, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 521, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 524, "usage_type": "name"}, {"api_name": "django.views.generic.list.ListView", "line_number": 524, "usage_type": "name"}, {"api_name": "models.Cart", "line_number": 525, "usage_type": "name"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 536, "usage_type": "name"}, {"api_name": "django.views.generic.list.ListView", "line_number": 536, "usage_type": "name"}, {"api_name": "models.Doctor", "line_number": 537, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 543, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 543, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 544, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 549, "usage_type": "call"}, {"api_name": "models.Doctor", "line_number": 549, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 550, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 547, "usage_type": "name"}, {"api_name": "forms.uploadForm", "line_number": 555, "usage_type": "call"}, {"api_name": "models.Doctor", "line_number": 559, "usage_type": "call"}, {"api_name": "models.Patient", "line_number": 561, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 565, "usage_type": "call"}, {"api_name": "forms.uploadForm", "line_number": 568, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 569, "usage_type": "call"}, {"api_name": "forms.uploadForm", "line_number": 572, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 573, "usage_type": "call"}, {"api_name": "models.Doctor.objects.get", "line_number": 578, "usage_type": "call"}, {"api_name": "models.Doctor.objects", "line_number": 578, "usage_type": "attribute"}, {"api_name": "models.Doctor", "line_number": 578, "usage_type": "name"}, {"api_name": "models.Doctor.DoesNotExist", "line_number": 579, "usage_type": "attribute"}, {"api_name": "models.Doctor", "line_number": 579, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 582, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 575, "usage_type": "name"}, {"api_name": "models.Cart.objects.get", "line_number": 596, "usage_type": "call"}, {"api_name": "models.Cart.objects", "line_number": 596, "usage_type": "attribute"}, {"api_name": "models.Cart", "line_number": 596, "usage_type": "name"}, {"api_name": "models.Order.objects.create", "line_number": 606, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 606, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 606, "usage_type": "name"}, {"api_name": "django.conf.settings.RAZOR_KEY_ID", "line_number": 616, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 616, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 620, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 590, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 632, "usage_type": "call"}, {"api_name": "os.path", "line_number": 632, "usage_type": "attribute"}, {"api_name": "django.conf.settings.STATIC_URL", "line_number": 632, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 632, "usage_type": "name"}, {"api_name": "django.template.loader.get_template", "line_number": 636, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 638, "usage_type": "call"}, {"api_name": "xhtml2pdf.pisa.pisaDocument", "line_number": 639, "usage_type": "call"}, {"api_name": "xhtml2pdf.pisa", "line_number": 639, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 639, "usage_type": "call"}, {"api_name": "django.shortcuts.HttpResponse", "line_number": 641, "usage_type": "call"}, {"api_name": "models.Order.objects.get", "line_number": 663, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 663, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 663, "usage_type": "name"}, {"api_name": "models.Cart.objects.get", "line_number": 671, "usage_type": "call"}, {"api_name": "models.Cart.objects", "line_number": 671, "usage_type": "attribute"}, {"api_name": "models.Cart", "line_number": 671, "usage_type": "name"}, {"api_name": "django.template.loader.get_template", "line_number": 680, "usage_type": "call"}, {"api_name": "models.Patient.objects.get", "line_number": 681, "usage_type": "call"}, {"api_name": "models.Patient.objects", "line_number": 681, "usage_type": "attribute"}, {"api_name": "models.Patient", "line_number": 681, "usage_type": "name"}, {"api_name": "models.Cart.objects.get", "line_number": 683, "usage_type": "call"}, {"api_name": "models.Cart.objects", "line_number": 683, "usage_type": "attribute"}, {"api_name": "models.Cart", "line_number": 683, "usage_type": "name"}, {"api_name": "models.Cart.objects.get", "line_number": 684, "usage_type": "call"}, {"api_name": "models.Cart.objects", "line_number": 684, "usage_type": "attribute"}, {"api_name": "models.Cart", "line_number": 684, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 696, "usage_type": "call"}, {"api_name": "xhtml2pdf.pisa.pisaDocument", "line_number": 697, "usage_type": "call"}, {"api_name": "xhtml2pdf.pisa", "line_number": 697, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 697, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 713, "usage_type": "call"}, {"api_name": "django.core.mail.EmailMultiAlternatives", "line_number": 716, "usage_type": "call"}, {"api_name": "django.conf.settings.EMAIL_HOST_USER", "line_number": 719, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 719, "usage_type": "name"}, {"api_name": "models.Cart.objects.get", "line_number": 726, "usage_type": "call"}, {"api_name": "models.Cart.objects", "line_number": 726, "usage_type": "attribute"}, {"api_name": "models.Cart", "line_number": 726, "usage_type": "name"}, {"api_name": "models.Cart.objects.get", "line_number": 734, "usage_type": "call"}, {"api_name": "models.Cart.objects", "line_number": 734, "usage_type": "attribute"}, {"api_name": "models.Cart", "line_number": 734, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 739, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 744, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 749, "usage_type": "call"}, {"api_name": "django.http.HttpResponseBadRequest", "line_number": 753, "usage_type": "call"}, {"api_name": "django.http.HttpResponseBadRequest", "line_number": 756, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 647, "usage_type": "name"}, {"api_name": "django.views.generic.View", "line_number": 762, "usage_type": "name"}, {"api_name": "models.Order.objects.get", "line_number": 765, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 765, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 765, "usage_type": "name"}, {"api_name": "django.shortcuts.HttpResponse", "line_number": 767, "usage_type": "call"}, {"api_name": "django.shortcuts.HttpResponse", "line_number": 782, "usage_type": "call"}, {"api_name": "django.shortcuts.HttpResponse", "line_number": 790, "usage_type": "call"}]} +{"seq_id": "600067573", "text": "import json\nimport logging\nimport math\n\nfrom django.contrib import messages\nfrom django.db.models import F, Q\nfrom django.http import JsonResponse\nfrom django.utils.translation import gettext as _, gettext_lazy, ngettext\nfrom django.views.generic.base import TemplateView, View\nfrom django.views.generic.edit import FormView\n\nfrom actionlog.mixins import LogActionMixin\nfrom actionlog.models import ActionLogEntry\nfrom options.utils import use_team_code_names, use_team_code_names_data_entry\nfrom participants.models import Adjudicator, Team\nfrom participants.prefetch import populate_feedback_scores\nfrom participants.templatetags.team_name_for_data_entry import team_name_for_data_entry\nfrom results.mixins import PublicSubmissionFieldsMixin, TabroomSubmissionFieldsMixin\nfrom results.prefetch import populate_wins_for_debateteams\nfrom tournaments.mixins import (PublicTournamentPageMixin, SingleObjectByRandomisedUrlMixin,\n SingleObjectFromTournamentMixin, TournamentMixin)\nfrom tournaments.models import Round\n\nfrom utils.misc import reverse_tournament\nfrom utils.mixins import AdministratorMixin, AssistantMixin\nfrom utils.views import PostOnlyRedirectView, VueTableTemplateView\nfrom utils.tables import TabbycatTableBuilder\n\nfrom .models import AdjudicatorFeedback, AdjudicatorTestScoreHistory\nfrom .forms import make_feedback_form_class, UpdateAdjudicatorScoresForm\nfrom .tables import FeedbackTableBuilder\nfrom .utils import get_feedback_overview\nfrom .prefetch import populate_debate_adjudicators\nfrom .progress import get_feedback_progress\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseFeedbackOverview(TournamentMixin, VueTableTemplateView):\n \"\"\" Also inherited by the adjudicator's tab \"\"\"\n\n def get_adjudicators(self):\n if not hasattr(self, '_adjudicators'):\n t = self.tournament\n if t.pref('share_adjs'):\n self._adjudicators = Adjudicator.objects.filter(Q(tournament=t) | Q(tournament__isnull=True))\n else:\n self._adjudicators = Adjudicator.objects.filter(tournament=t)\n populate_feedback_scores(self._adjudicators)\n return self._adjudicators\n\n def get_context_data(self, **kwargs):\n t = self.tournament\n adjudicators = self.get_adjudicators()\n weight = t.current_round.feedback_weight\n scores = [a.weighted_score(weight) for a in adjudicators]\n\n kwargs['c_breaking'] = adjudicators.filter(breaking=True).count()\n\n ntotal = len(scores)\n ntrainees = [x < t.pref('adj_min_voting_score') for x in scores].count(True)\n nvoting = ntotal - ntrainees\n ndebates = t.team_set.count() // (4 if t.pref('teams_in_debate') == 'bp' else 2)\n nchairs = min(nvoting, ndebates)\n npanellists = nvoting - nchairs\n\n max_score = int(math.ceil(t.pref('adj_max_score')))\n min_score = int(math.floor(t.pref('adj_min_score')))\n range_width = max_score - min_score\n band_widths = [range_width // 5] * 5\n for i in range(range_width - sum(band_widths)):\n band_widths[i] += 1\n band_widths = [x for x in band_widths if x > 0]\n bands = []\n threshold = max_score\n for width in band_widths:\n bands.append((threshold - width, threshold))\n threshold = threshold - width\n if not threshold == min_score:\n logger.error(\"Feedback bands calculation didn't work\")\n\n band_specs = []\n threshold_classes = ['80', '70', '60', '50', '40'] # CSS suffix\n for (band_min, band_max), threshold_class in zip(bands, threshold_classes):\n band_specs.append({\n 'min': band_min, 'max': band_max, 'class': threshold_class,\n 'count': [x >= band_min and x < band_max for x in scores].count(True)\n })\n band_specs[0]['count'] += [x == max_score for x in scores].count(True)\n\n noutside_range = [x < min_score or x > max_score for x in scores].count(True)\n\n kwargs.update({\n 'c_total': ntotal,\n 'c_chairs': nchairs,\n 'c_debates': ndebates,\n 'c_panellists': npanellists,\n 'c_trainees': ntrainees,\n 'c_thresholds': band_specs,\n 'nadjs_outside_range': noutside_range,\n 'test_percent': (1.0 - weight) * 100,\n 'feedback_percent': weight * 100,\n })\n\n return super().get_context_data(**kwargs)\n\n def get_table(self):\n t = self.tournament\n adjudicators = self.get_adjudicators()\n # Gather stats necessary to construct the graphs\n adjudicators = get_feedback_overview(t, adjudicators)\n table = FeedbackTableBuilder(view=self, sort_key=self.sort_key,\n sort_order=self.sort_order)\n table = self.annotate_table(table, adjudicators)\n return table\n\n\nclass FeedbackOverview(AdministratorMixin, BaseFeedbackOverview):\n\n page_title = gettext_lazy(\"Feedback Overview\")\n page_emoji = '🙅'\n for_public = False\n sort_key = 'score'\n sort_order = 'desc'\n template_name = 'feedback_overview.html'\n\n def annotate_table(self, table, adjudicators):\n feedback_weight = self.tournament.current_round.feedback_weight\n scores = {adj: adj.weighted_score(feedback_weight) for adj in adjudicators}\n\n table.add_adjudicator_columns(adjudicators, show_institutions=False, subtext='institution')\n table.add_breaking_checkbox(adjudicators)\n table.add_weighted_score_columns(adjudicators, scores)\n table.add_test_score_columns(adjudicators, editable=True)\n table.add_score_difference_columns(adjudicators, scores)\n table.add_score_variance_columns(adjudicators)\n table.add_feedback_graphs(adjudicators)\n table.add_feedback_link_columns(adjudicators)\n if self.tournament.pref('enable_adj_notes'):\n table.add_feedback_note_columns(adjudicators)\n return table\n\n\nclass FeedbackByTargetView(AdministratorMixin, TournamentMixin, VueTableTemplateView):\n template_name = \"feedback_base.html\"\n page_title = gettext_lazy(\"Find Feedback on Adjudicator\")\n page_emoji = '🔍'\n\n def get_table(self):\n tournament = self.tournament\n table = TabbycatTableBuilder(view=self, sort_key=\"name\")\n table.add_adjudicator_columns(tournament.adjudicator_set.all())\n feedback_data = []\n for adj in tournament.adjudicator_set.all():\n count = adj.adjudicatorfeedback_set.count()\n feedback_data.append({\n 'text': ngettext(\"%(count)d feedback\", \"%(count)d feedbacks\", count) % {'count': count},\n 'link': reverse_tournament('adjfeedback-view-on-adjudicator', tournament, kwargs={'pk': adj.id}),\n })\n table.add_column({'key': 'feedbacks', 'title': _(\"Feedbacks\")}, feedback_data)\n return table\n\n\nclass FeedbackBySourceView(AdministratorMixin, TournamentMixin, VueTableTemplateView):\n\n template_name = \"feedback_base.html\"\n page_title = gettext_lazy(\"Find Feedback\")\n page_emoji = '🔍'\n\n def get_tables(self):\n tournament = self.tournament\n\n teams = tournament.team_set.all()\n team_table = TabbycatTableBuilder(\n view=self, title='From Teams', sort_key='team')\n team_table.add_team_columns(teams)\n team_feedback_data = []\n for team in teams:\n count = AdjudicatorFeedback.objects.filter(\n source_team__team=team).select_related(\n 'source_team__team').count()\n team_feedback_data.append({\n 'text': ngettext(\"%(count)d feedback\", \"%(count)d feedbacks\", count) % {'count': count},\n 'link': reverse_tournament('adjfeedback-view-from-team',\n tournament,\n kwargs={'pk': team.id}),\n })\n team_table.add_column({'key': 'feedbacks', 'title': _(\"Feedbacks\")}, team_feedback_data)\n\n adjs = tournament.adjudicator_set.all()\n adj_table = TabbycatTableBuilder(\n view=self, title='From Adjudicators', sort_key='name')\n adj_table.add_adjudicator_columns(adjs)\n adj_feedback_data = []\n for adj in adjs:\n count = AdjudicatorFeedback.objects.filter(\n source_adjudicator__adjudicator=adj).select_related(\n 'source_adjudicator__adjudicator').count()\n adj_feedback_data.append({\n 'text': ngettext(\"%(count)d feedback\", \"%(count)d feedbacks\", count) % {'count': count},\n 'link': reverse_tournament('adjfeedback-view-from-adjudicator',\n tournament,\n kwargs={'pk': adj.id}),\n })\n adj_table.add_column({'key': 'feedbacks', 'title': _(\"Feedbacks\")}, adj_feedback_data)\n\n return [team_table, adj_table]\n\n\nclass FeedbackMixin(TournamentMixin):\n\n def get_feedbacks(self):\n feedbacks = self.get_feedback_queryset()\n\n populate_debate_adjudicators(feedbacks)\n populate_wins_for_debateteams([f.source_team for f in feedbacks if f.source_team is not None])\n\n # Can't prefetch an abstract model effectively; so get all answers...\n questions = list(self.tournament.adj_feedback_questions)\n for question in questions:\n question.answers = list(question.answer_set.values())\n\n for feedback in feedbacks:\n feedback.items = []\n # ...and stitch them together manually\n for question in questions:\n for answer in question.answers:\n if answer['feedback_id'] == feedback.id:\n feedback.items.append({'question': question,\n 'answer': answer['answer']})\n break # Should only be one match\n\n return feedbacks\n\n def get_feedback_queryset(self):\n return AdjudicatorFeedback.objects.filter(\n Q(adjudicator__tournament=self.tournament) |\n Q(adjudicator__tournament__isnull=True)\n ).select_related(\n 'adjudicator',\n 'source_adjudicator__adjudicator',\n 'source_adjudicator__debate__round',\n 'source_team__debate__round',\n 'source_team__team',\n )\n\n\nclass FeedbackCardsView(FeedbackMixin, AdministratorMixin, TournamentMixin, TemplateView):\n \"\"\"Base class for views displaying feedback as cards.\"\"\"\n template_name = \"feedback_cards_list.html\"\n\n def get_score_thresholds(self):\n tournament = self.tournament\n min_score = tournament.pref('adj_min_score')\n max_score = tournament.pref('adj_max_score')\n score_range = max_score - min_score\n return {\n 'low_score' : min_score + score_range / 10,\n 'medium_score' : min_score + score_range / 5,\n 'high_score' : max_score - score_range / 10,\n }\n\n def get_context_data(self, **kwargs):\n kwargs['feedbacks'] = self.get_feedbacks()\n kwargs['score_thresholds'] = self.get_score_thresholds()\n return super().get_context_data(**kwargs)\n\n\nclass LatestFeedbackView(FeedbackCardsView):\n \"\"\"View displaying the latest feedback.\"\"\"\n page_title = gettext_lazy(\"Latest Feedback\")\n page_subtitle = gettext_lazy(\"(30 most recent)\")\n page_emoji = '🕗 '\n\n def get_feedback_queryset(self):\n queryset = super().get_feedback_queryset()\n return queryset.order_by('-timestamp')[:30]\n\n\nclass ImportantFeedbackView(FeedbackCardsView):\n \"\"\"View displaying the feedback in order of most 'important'.\"\"\"\n page_title = gettext_lazy(\"Important Feedback\")\n page_subtitle = gettext_lazy(\"(rating was much higher/lower than expected)\")\n page_emoji = '⁉️'\n\n def get_feedback_queryset(self):\n queryset = super().get_feedback_queryset()\n return queryset.annotate(\n feedback_importance=F('score') - F('adjudicator__test_score')\n ).filter(\n Q(feedback_importance__gt=2) | Q(feedback_importance__lt=-2),\n ).order_by('-timestamp')\n\n\nclass FeedbackFromSourceView(SingleObjectFromTournamentMixin, FeedbackCardsView):\n \"\"\"Base class for views displaying feedback from a given team or adjudicator.\"\"\"\n\n template_name = \"feedback_by_source.html\"\n source_name_attr = None\n source_type = \"from\"\n adjfeedback_filter_field = None\n\n def get_context_data(self, **kwargs):\n kwargs['source_name'] = getattr(self.object, self.source_name_attr, '')\n kwargs['source_type'] = self.source_type\n return super().get_context_data(**kwargs)\n\n def get(self, request, *args, **kwargs):\n self.object = self.get_object()\n return super().get(request, *args, **kwargs)\n\n def get_feedback_queryset(self):\n queryset = super().get_feedback_queryset()\n kwargs = {self.adjfeedback_filter_field: self.object}\n return queryset.filter(**kwargs).order_by('-timestamp')\n\n\nclass FeedbackOnAdjudicatorView(FeedbackFromSourceView):\n \"\"\"Base class for views displaying feedback from a given team or adjudicator.\"\"\"\n\n model = Adjudicator\n source_name_attr = 'name'\n source_type = \"on\"\n adjfeedback_filter_field = 'adjudicator'\n allow_null_tournament = True\n\n\nclass FeedbackFromTeamView(FeedbackFromSourceView):\n \"\"\"View displaying feedback from a given source.\"\"\"\n model = Team\n source_name_attr = 'short_name'\n adjfeedback_filter_field = 'source_team__team'\n allow_null_tournament = False\n\n\nclass FeedbackFromAdjudicatorView(FeedbackFromSourceView):\n \"\"\"View displaying feedback from a given adjudicator.\"\"\"\n model = Adjudicator\n source_name_attr = 'name'\n adjfeedback_filter_field = 'source_adjudicator__adjudicator'\n allow_null_tournament = True\n\n\nclass BaseAddFeedbackIndexView(TournamentMixin, VueTableTemplateView):\n\n def get_tables(self):\n tournament = self.tournament\n\n use_code_names = use_team_code_names_data_entry(self.tournament, self.tabroom)\n teams_table = TabbycatTableBuilder(view=self, sort_key=\"team\", title=_(\"A Team\"))\n add_link_data = [{\n 'text': team_name_for_data_entry(team, use_code_names),\n 'link': self.get_from_team_link(team)\n } for team in tournament.team_set.all()]\n header = {'key': 'team', 'title': _(\"Team\")}\n teams_table.add_column(header, add_link_data)\n\n if tournament.pref('show_team_institutions'):\n teams_table.add_column({\n 'key': 'institution',\n 'icon': 'home',\n 'tooltip': _(\"Institution\"),\n }, [team.institution.code if team.institution else TabbycatTableBuilder.BLANK_TEXT for team in tournament.team_set.all()])\n\n if tournament.pref('share_adjs'):\n adjudicators = Adjudicator.objects.filter(Q(tournament=tournament) | Q(tournament__isnull=True))\n else:\n adjudicators = tournament.adjudicator_set.all()\n\n adjs_table = TabbycatTableBuilder(view=self, sort_key=\"adjudicator\", title=_(\"An Adjudicator\"))\n if tournament.pref('share_adjs'):\n adjudicators = Adjudicator.objects.filter(Q(tournament=tournament) | Q(tournament__isnull=True))\n else:\n adjudicators = tournament.adjudicator_set.all()\n\n add_link_data = [{\n 'text': adj.name,\n 'link': self.get_from_adj_link(adj),\n } for adj in adjudicators]\n header = {'key': 'adjudicator', 'title': _(\"Adjudicator\")}\n adjs_table.add_column(header, add_link_data)\n\n if tournament.pref('show_adjudicator_institutions'):\n adjs_table.add_column({\n 'key': 'institution',\n 'icon': 'home',\n 'tooltip': _(\"Institution\"),\n }, [adj.institution.code if adj.institution else TabbycatTableBuilder.BLANK_TEXT for adj in adjudicators])\n\n return [teams_table, adjs_table]\n\n\nclass AdminAddFeedbackIndexView(AdministratorMixin, BaseAddFeedbackIndexView):\n \"\"\"View for the index page for administrators to add feedback. The index\n page lists all possible sources; officials should then choose the author\n of the feedback.\"\"\"\n template_name = 'add_feedback.html'\n tabroom = True\n\n def get_from_adj_link(self, adj):\n return reverse_tournament('adjfeedback-add-from-adjudicator',\n self.tournament, kwargs={'source_id': adj.id})\n\n def get_from_team_link(self, team):\n return reverse_tournament('adjfeedback-add-from-team',\n self.tournament, kwargs={'source_id': team.id})\n\n\nclass AssistantAddFeedbackIndexView(AssistantMixin, BaseAddFeedbackIndexView):\n \"\"\"As for AdminAddFeedbackIndexView, but for assistants.\"\"\"\n template_name = 'assistant_add_feedback.html'\n tabroom = True\n\n def get_from_adj_link(self, adj):\n return reverse_tournament('adjfeedback-assistant-add-from-adjudicator',\n self.tournament, kwargs={'source_id': adj.id})\n\n def get_from_team_link(self, team):\n return reverse_tournament('adjfeedback-assistant-add-from-team',\n self.tournament, kwargs={'source_id': team.id})\n\n\nclass PublicAddFeedbackIndexView(PublicTournamentPageMixin, BaseAddFeedbackIndexView):\n \"\"\"View for the index page for public users to add feedback. The index page\n lists all possible sources; public users should then choose themselves.\"\"\"\n\n template_name = 'public_add_feedback.html'\n tabroom = False\n\n def is_page_enabled(self, tournament):\n return tournament.pref('participant_feedback') == 'public'\n\n def get_from_adj_link(self, team):\n return reverse_tournament('adjfeedback-public-add-from-adjudicator-pk',\n self.tournament, kwargs={'source_id': team.id})\n\n def get_from_team_link(self, team):\n return reverse_tournament('adjfeedback-public-add-from-team-pk',\n self.tournament, kwargs={'source_id': team.id})\n\n\nclass BaseAddFeedbackView(LogActionMixin, SingleObjectFromTournamentMixin, FormView):\n \"\"\"Base class for views that allow users to add feedback.\"\"\"\n\n template_name = \"enter_feedback.html\"\n pk_url_kwarg = 'source_id'\n allow_null_tournament = True\n action_log_content_object_attr = 'adj_feedback'\n\n def get_form_class(self):\n return make_feedback_form_class(self.object, self.tournament,\n self.get_submitter_fields(), **self.feedback_form_class_kwargs)\n\n def form_valid(self, form):\n self.adj_feedback = form.save()\n self.round = self.adj_feedback.debate.round # for LogActionMixin\n return super().form_valid(form)\n\n def get_context_data(self, **kwargs):\n source = self.object\n if isinstance(source, Adjudicator):\n kwargs['source_type'] = \"adj\"\n elif isinstance(source, Team):\n kwargs['source_type'] = \"team\"\n kwargs['source_name'] = self.source_name\n return super().get_context_data(**kwargs)\n\n def _populate_source(self):\n self.object = self.get_object() # For compatibility with SingleObjectMixin\n if isinstance(self.object, Adjudicator):\n self.source_name = self.object.name\n elif isinstance(self.object, Team):\n self.source_name = self.get_team_short_name(self.object)\n else:\n logger.error(\"self.object was neither an Adjudicator nor a Team\")\n self.source_name = \"\"\n\n def get(self, request, *args, **kwargs):\n self._populate_source()\n return super().get(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n self._populate_source()\n return super().post(request, *args, **kwargs)\n\n\nclass BaseTabroomAddFeedbackView(TabroomSubmissionFieldsMixin, BaseAddFeedbackView):\n \"\"\"View for tabroom officials to add feedback.\"\"\"\n\n action_log_type = ActionLogEntry.ACTION_TYPE_FEEDBACK_SAVE\n feedback_form_class_kwargs = {\n 'confirm_on_submit': True,\n 'enforce_required': False,\n 'include_unreleased_draws': True,\n 'use_tournament_password': False,\n }\n\n def get_team_short_name(self, team):\n use_code_names = use_team_code_names_data_entry(self.tournament, tabroom=True)\n return team_name_for_data_entry(team, use_code_names)\n\n def form_valid(self, form):\n result = super().form_valid(form)\n messages.success(self.request, \"Feedback from {} on {} added.\".format(\n self.source_name, self.adj_feedback.adjudicator.name))\n return result\n\n def get_success_url(self):\n return reverse_tournament('adjfeedback-add-index', self.tournament)\n\n\nclass AdminAddFeedbackView(AdministratorMixin, BaseTabroomAddFeedbackView):\n pass\n\n\nclass AssistantAddFeedbackView(AssistantMixin, BaseTabroomAddFeedbackView):\n pass\n\n\nclass PublicAddFeedbackView(PublicSubmissionFieldsMixin, PublicTournamentPageMixin, BaseAddFeedbackView):\n \"\"\"Base class for views for public users to add feedback.\"\"\"\n\n action_log_type = ActionLogEntry.ACTION_TYPE_FEEDBACK_SUBMIT\n feedback_form_class_kwargs = {\n 'confirm_on_submit': True,\n 'enforce_required': True,\n 'include_unreleased_draws': False,\n 'use_tournament_password': True,\n }\n\n def form_valid(self, form):\n result = super().form_valid(form)\n messages.success(self.request, \"Thanks, {}! Your feedback on {} has been recorded.\".format(\n self.source_name, self.adj_feedback.adjudicator.name))\n return result\n\n def get_context_data(self, **kwargs):\n kwargs['no_rounds_released'] = not self.tournament.round_set.filter(\n draw_status=Round.STATUS_RELEASED).exists()\n return super().get_context_data(**kwargs)\n\n\nclass PublicAddFeedbackByRandomisedUrlView(SingleObjectByRandomisedUrlMixin, PublicAddFeedbackView):\n \"\"\"View for public users to add feedback, where the URL is a randomised one.\"\"\"\n\n def get_team_short_name(self, team):\n # It's a private URL, so always show the team's real name.\n return team.short_name\n\n def is_page_enabled(self, tournament):\n return tournament.pref('participant_feedback') == 'private-urls'\n\n def get_success_url(self):\n # Redirect to non-cached page: their original private URL\n if isinstance(self.object, Adjudicator):\n return reverse_tournament('adjfeedback-public-add-from-adjudicator-randomised',\n self.tournament, kwargs={'url_key': self.object.url_key})\n elif isinstance(self.object, Team):\n return reverse_tournament('adjfeedback-public-add-from-team-randomised',\n self.tournament, kwargs={'url_key': self.object.url_key})\n else:\n raise ValueError(\"Private feedback source is not of a valid type\")\n\n\nclass PublicAddFeedbackByIdUrlView(PublicAddFeedbackView):\n \"\"\"View for public users to add feedback, where the URL is by object ID.\"\"\"\n\n tabroom = False\n\n def get_team_short_name(self, team):\n use_code_names = use_team_code_names(self.tournament, admin=False)\n return team.code_name if use_code_names else team.short_name\n\n def is_page_enabled(self, tournament):\n return tournament.pref('participant_feedback') == 'public'\n\n def get_success_url(self):\n # Redirect to non-cached page: the public feedback form\n if isinstance(self.object, Adjudicator):\n return reverse_tournament('adjfeedback-public-add-from-adjudicator-pk',\n self.tournament, kwargs={'source_id': self.object.id})\n elif isinstance(self.object, Team):\n return reverse_tournament('adjfeedback-public-add-from-team-pk',\n self.tournament, kwargs={'source_id': self.object.id})\n else:\n raise ValueError(\"Public feedback source is not of a valid type\")\n\n\nclass AdjudicatorActionError(RuntimeError):\n pass\n\n\nclass BaseAdjudicatorActionView(LogActionMixin, AdministratorMixin, TournamentMixin, PostOnlyRedirectView):\n\n tournament_redirect_pattern_name = 'adjfeedback-overview'\n action_log_content_object_attr = 'adjudicator'\n\n def get_adjudicator(self, request):\n try:\n adj_id = int(request.POST[\"adj_id\"])\n adjudicator = Adjudicator.objects.get(id=adj_id)\n except (ValueError, Adjudicator.DoesNotExist, Adjudicator.MultipleObjectsReturned):\n raise AdjudicatorActionError(\"Whoops! I didn't recognise that adjudicator: {}\".format(adj_id))\n return adjudicator\n\n def post(self, request, *args, **kwargs):\n try:\n self.adjudicator = self.get_adjudicator(request)\n self.modify_adjudicator(request, self.adjudicator)\n self.log_action() # Need to call explicitly, since this isn't a form view\n except AdjudicatorActionError as e:\n messages.error(request, str(e))\n\n return super().post(request, *args, **kwargs)\n\n\nclass SetAdjudicatorTestScoreView(BaseAdjudicatorActionView):\n\n action_log_type = ActionLogEntry.ACTION_TYPE_TEST_SCORE_EDIT\n action_log_content_object_attr = 'atsh'\n\n def modify_adjudicator(self, request, adjudicator):\n try:\n score = float(request.POST[\"test_score\"])\n except ValueError:\n raise AdjudicatorActionError(\"Whoops! The value isn't a valid test score.\")\n\n adjudicator.test_score = score\n adjudicator.save()\n\n atsh = AdjudicatorTestScoreHistory(\n adjudicator=adjudicator, round=self.tournament.current_round,\n score=score)\n atsh.save()\n self.atsh = atsh\n\n\nclass SetAdjudicatorBreakingStatusView(AdministratorMixin, TournamentMixin, LogActionMixin, View):\n\n action_log_type = ActionLogEntry.ACTION_TYPE_ADJUDICATOR_BREAK_SET\n\n def post(self, request, *args, **kwargs):\n body = self.request.body.decode('utf-8')\n posted_info = json.loads(body)\n adjudicator = Adjudicator.objects.get(id=posted_info['id'])\n adjudicator.breaking = posted_info['breaking']\n adjudicator.save()\n return JsonResponse(json.dumps(True), safe=False)\n\n\nclass SetAdjudicatorNoteView(BaseAdjudicatorActionView):\n\n action_log_type = ActionLogEntry.ACTION_TYPE_ADJUDICATOR_NOTE_SET\n\n def modify_adjudicator(self, request, adjudicator):\n try:\n note = str(request.POST[\"note\"])\n except ValueError as e:\n raise AdjudicatorActionError(\"Whoop! There was an error interpreting that string: \" + str(e))\n\n adjudicator.notes = note\n adjudicator.save()\n\n\nclass BaseFeedbackProgressView(TournamentMixin, VueTableTemplateView):\n\n page_title = gettext_lazy(\"Feedback Progress\")\n page_subtitle = ''\n page_emoji = '🆘'\n\n def get_feedback_progress(self):\n if not hasattr(self, \"_feedback_progress_result\"):\n self._feedback_progress_result = get_feedback_progress(self.tournament)\n return self._feedback_progress_result\n\n def get_page_subtitle(self):\n teams_progress, adjs_progress = self.get_feedback_progress()\n total_missing = sum([progress.num_unsubmitted() for progress in teams_progress + adjs_progress])\n return \"{:d} missing feedback submissions\".format(total_missing)\n\n def get_tables(self):\n teams_progress, adjs_progress = self.get_feedback_progress()\n\n adjs_table = FeedbackTableBuilder(view=self, title=\"From Adjudicators\",\n sort_key=\"owed\", sort_order=\"desc\")\n adjudicators = [progress.adjudicator for progress in adjs_progress]\n adjs_table.add_adjudicator_columns(adjudicators, show_metadata=False)\n adjs_table.add_feedback_progress_columns(adjs_progress)\n\n teams_table = FeedbackTableBuilder(view=self, title=\"From Teams\",\n sort_key=\"owed\", sort_order=\"desc\")\n teams = [progress.team for progress in teams_progress]\n teams_table.add_team_columns(teams)\n teams_table.add_feedback_progress_columns(teams_progress)\n\n return [adjs_table, teams_table]\n\n\nclass FeedbackProgress(AdministratorMixin, BaseFeedbackProgressView):\n template_name = 'feedback_base.html'\n\n\nclass PublicFeedbackProgress(PublicTournamentPageMixin, BaseFeedbackProgressView):\n public_page_preference = 'feedback_progress'\n\n\n# ==============================================================================\n# Update adjudicator scores in bulk\n# ==============================================================================\n\nclass UpdateAdjudicatorScoresView(AdministratorMixin, LogActionMixin, TournamentMixin, FormView):\n template_name = 'update_adjudicator_scores.html'\n form_class = UpdateAdjudicatorScoresForm\n action_log_type = ActionLogEntry.ACTION_TYPE_UPDATE_ADJUDICATOR_SCORES\n\n def get_context_data(self, **kwargs):\n sample_adjs = self.tournament.relevant_adjudicators.all()[:3]\n if len(sample_adjs) == 0:\n kwargs['no_adjs_in_database'] = True\n kwargs['sample'] = [(\"Estella Brandybuck\", 5.0), (\"Pia Hermansson\", 4.0), (\"Lucas Sousa\", 3.5)]\n else:\n kwargs['sample'] = [(adj.name, adj.test_score) for adj in sample_adjs]\n return super().get_context_data(**kwargs)\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['tournament'] = self.tournament\n return kwargs\n\n def get_success_url(self):\n return reverse_tournament('adjfeedback-overview', self.tournament)\n\n def form_valid(self, form):\n nupdated = form.save()\n messages.success(self.request, _(\"Updated test scores for %(count)d adjudicators.\") % {'count': nupdated})\n self.log_action()\n return super().form_valid(form)\n", "sub_path": "tabbycat/adjfeedback/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 29844, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "logging.getLogger", "line_number": 36, "usage_type": "call"}, {"api_name": "tournaments.mixins.TournamentMixin", "line_number": 39, "usage_type": "name"}, {"api_name": "utils.views.VueTableTemplateView", "line_number": 39, "usage_type": "name"}, {"api_name": "participants.models.Adjudicator.objects.filter", "line_number": 46, "usage_type": "call"}, {"api_name": "participants.models.Adjudicator.objects", "line_number": 46, "usage_type": "attribute"}, {"api_name": "participants.models.Adjudicator", "line_number": 46, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 46, "usage_type": "call"}, {"api_name": "participants.models.Adjudicator.objects.filter", "line_number": 48, "usage_type": "call"}, {"api_name": "participants.models.Adjudicator.objects", "line_number": 48, "usage_type": "attribute"}, {"api_name": "participants.models.Adjudicator", "line_number": 48, "usage_type": "name"}, {"api_name": "participants.prefetch.populate_feedback_scores", "line_number": 49, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 67, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 68, "usage_type": "call"}, {"api_name": "utils.get_feedback_overview", "line_number": 111, "usage_type": "call"}, {"api_name": "tables.FeedbackTableBuilder", "line_number": 112, "usage_type": "call"}, {"api_name": "utils.mixins.AdministratorMixin", "line_number": 118, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 120, "usage_type": "call"}, {"api_name": "utils.mixins.AdministratorMixin", "line_number": 144, "usage_type": "name"}, {"api_name": "tournaments.mixins.TournamentMixin", "line_number": 144, "usage_type": "name"}, {"api_name": "utils.views.VueTableTemplateView", "line_number": 144, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 146, "usage_type": "call"}, {"api_name": "utils.tables.TabbycatTableBuilder", "line_number": 151, "usage_type": "call"}, {"api_name": "django.utils.translation.ngettext", "line_number": 157, "usage_type": "call"}, {"api_name": "utils.misc.reverse_tournament", "line_number": 158, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 160, "usage_type": "call"}, {"api_name": "utils.mixins.AdministratorMixin", "line_number": 164, "usage_type": "name"}, {"api_name": "tournaments.mixins.TournamentMixin", "line_number": 164, "usage_type": "name"}, {"api_name": "utils.views.VueTableTemplateView", "line_number": 164, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 167, "usage_type": "call"}, {"api_name": "utils.tables.TabbycatTableBuilder", "line_number": 174, "usage_type": "call"}, {"api_name": "models.AdjudicatorFeedback.objects.filter", "line_number": 179, "usage_type": "call"}, {"api_name": "models.AdjudicatorFeedback.objects", "line_number": 179, "usage_type": "attribute"}, {"api_name": "models.AdjudicatorFeedback", "line_number": 179, "usage_type": "name"}, {"api_name": "django.utils.translation.ngettext", "line_number": 183, "usage_type": "call"}, {"api_name": "utils.misc.reverse_tournament", "line_number": 184, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 188, "usage_type": "call"}, {"api_name": "utils.tables.TabbycatTableBuilder", "line_number": 191, "usage_type": "call"}, {"api_name": "models.AdjudicatorFeedback.objects.filter", "line_number": 196, "usage_type": "call"}, {"api_name": "models.AdjudicatorFeedback.objects", "line_number": 196, "usage_type": "attribute"}, {"api_name": "models.AdjudicatorFeedback", "line_number": 196, "usage_type": "name"}, {"api_name": "django.utils.translation.ngettext", "line_number": 200, "usage_type": "call"}, {"api_name": "utils.misc.reverse_tournament", "line_number": 201, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 205, "usage_type": "call"}, {"api_name": "tournaments.mixins.TournamentMixin", "line_number": 210, "usage_type": "name"}, {"api_name": "prefetch.populate_debate_adjudicators", "line_number": 215, "usage_type": "call"}, {"api_name": "results.prefetch.populate_wins_for_debateteams", "line_number": 216, "usage_type": "call"}, {"api_name": "models.AdjudicatorFeedback.objects.filter", "line_number": 236, "usage_type": "call"}, {"api_name": "models.AdjudicatorFeedback.objects", "line_number": 236, "usage_type": "attribute"}, {"api_name": "models.AdjudicatorFeedback", "line_number": 236, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 237, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 238, "usage_type": "call"}, {"api_name": "utils.mixins.AdministratorMixin", "line_number": 248, "usage_type": "name"}, {"api_name": "tournaments.mixins.TournamentMixin", "line_number": 248, "usage_type": "name"}, {"api_name": "django.views.generic.base.TemplateView", "line_number": 248, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 271, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 272, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 282, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 283, "usage_type": "call"}, {"api_name": "django.db.models.F", "line_number": 289, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 291, "usage_type": "call"}, {"api_name": "tournaments.mixins.SingleObjectFromTournamentMixin", "line_number": 295, "usage_type": "name"}, {"api_name": "participants.models.Adjudicator", "line_number": 321, "usage_type": "name"}, {"api_name": "participants.models.Team", "line_number": 330, "usage_type": "name"}, {"api_name": "participants.models.Adjudicator", "line_number": 338, "usage_type": "name"}, {"api_name": "tournaments.mixins.TournamentMixin", "line_number": 344, "usage_type": "name"}, {"api_name": "utils.views.VueTableTemplateView", "line_number": 344, "usage_type": "name"}, {"api_name": "options.utils.use_team_code_names_data_entry", "line_number": 349, "usage_type": "call"}, {"api_name": "utils.tables.TabbycatTableBuilder", "line_number": 350, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 350, "usage_type": "call"}, {"api_name": "participants.templatetags.team_name_for_data_entry.team_name_for_data_entry", "line_number": 352, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 355, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 362, "usage_type": "call"}, {"api_name": "utils.tables.TabbycatTableBuilder.BLANK_TEXT", "line_number": 363, "usage_type": "attribute"}, {"api_name": "utils.tables.TabbycatTableBuilder", "line_number": 363, "usage_type": "name"}, {"api_name": "participants.models.Adjudicator.objects.filter", "line_number": 366, "usage_type": "call"}, {"api_name": "participants.models.Adjudicator.objects", "line_number": 366, "usage_type": "attribute"}, {"api_name": "participants.models.Adjudicator", "line_number": 366, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 366, "usage_type": "call"}, {"api_name": "utils.tables.TabbycatTableBuilder", "line_number": 370, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 370, "usage_type": "call"}, {"api_name": "participants.models.Adjudicator.objects.filter", "line_number": 372, "usage_type": "call"}, {"api_name": "participants.models.Adjudicator.objects", "line_number": 372, "usage_type": "attribute"}, {"api_name": "participants.models.Adjudicator", "line_number": 372, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 372, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 380, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 387, "usage_type": "call"}, {"api_name": "utils.tables.TabbycatTableBuilder.BLANK_TEXT", "line_number": 388, "usage_type": "attribute"}, {"api_name": "utils.tables.TabbycatTableBuilder", "line_number": 388, "usage_type": "name"}, {"api_name": "utils.mixins.AdministratorMixin", "line_number": 393, "usage_type": "name"}, {"api_name": "utils.misc.reverse_tournament", "line_number": 401, "usage_type": "call"}, {"api_name": "utils.misc.reverse_tournament", "line_number": 405, "usage_type": "call"}, {"api_name": "utils.mixins.AssistantMixin", "line_number": 409, "usage_type": "name"}, {"api_name": "utils.misc.reverse_tournament", "line_number": 415, "usage_type": "call"}, {"api_name": "utils.misc.reverse_tournament", "line_number": 419, "usage_type": "call"}, {"api_name": "tournaments.mixins.PublicTournamentPageMixin", "line_number": 423, "usage_type": "name"}, {"api_name": "utils.misc.reverse_tournament", "line_number": 434, "usage_type": "call"}, {"api_name": "utils.misc.reverse_tournament", "line_number": 438, "usage_type": "call"}, {"api_name": "actionlog.mixins.LogActionMixin", "line_number": 442, "usage_type": "name"}, {"api_name": "tournaments.mixins.SingleObjectFromTournamentMixin", "line_number": 442, "usage_type": "name"}, {"api_name": "django.views.generic.edit.FormView", "line_number": 442, "usage_type": "name"}, {"api_name": "forms.make_feedback_form_class", "line_number": 451, "usage_type": "call"}, {"api_name": "participants.models.Adjudicator", "line_number": 461, "usage_type": "argument"}, {"api_name": "participants.models.Team", "line_number": 463, "usage_type": "argument"}, {"api_name": "participants.models.Adjudicator", "line_number": 470, "usage_type": "argument"}, {"api_name": "participants.models.Team", "line_number": 472, "usage_type": "argument"}, {"api_name": "results.mixins.TabroomSubmissionFieldsMixin", "line_number": 487, "usage_type": "name"}, {"api_name": "actionlog.models.ActionLogEntry.ACTION_TYPE_FEEDBACK_SAVE", "line_number": 490, "usage_type": "attribute"}, {"api_name": "actionlog.models.ActionLogEntry", "line_number": 490, "usage_type": "name"}, {"api_name": "options.utils.use_team_code_names_data_entry", "line_number": 499, "usage_type": "call"}, {"api_name": "participants.templatetags.team_name_for_data_entry.team_name_for_data_entry", "line_number": 500, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 504, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 504, "usage_type": "name"}, {"api_name": "utils.misc.reverse_tournament", "line_number": 509, "usage_type": "call"}, {"api_name": "utils.mixins.AdministratorMixin", "line_number": 512, "usage_type": "name"}, {"api_name": "utils.mixins.AssistantMixin", "line_number": 516, "usage_type": "name"}, {"api_name": "results.mixins.PublicSubmissionFieldsMixin", "line_number": 520, "usage_type": "name"}, {"api_name": "tournaments.mixins.PublicTournamentPageMixin", "line_number": 520, "usage_type": "name"}, {"api_name": "actionlog.models.ActionLogEntry.ACTION_TYPE_FEEDBACK_SUBMIT", "line_number": 523, "usage_type": "attribute"}, {"api_name": "actionlog.models.ActionLogEntry", "line_number": 523, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 533, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 533, "usage_type": "name"}, {"api_name": "tournaments.models.Round.STATUS_RELEASED", "line_number": 539, "usage_type": "attribute"}, {"api_name": "tournaments.models.Round", "line_number": 539, "usage_type": "name"}, {"api_name": "tournaments.mixins.SingleObjectByRandomisedUrlMixin", "line_number": 543, "usage_type": "name"}, {"api_name": "participants.models.Adjudicator", "line_number": 555, "usage_type": "argument"}, {"api_name": "utils.misc.reverse_tournament", "line_number": 556, "usage_type": "call"}, {"api_name": "participants.models.Team", "line_number": 558, "usage_type": "argument"}, {"api_name": "utils.misc.reverse_tournament", "line_number": 559, "usage_type": "call"}, {"api_name": "options.utils.use_team_code_names", "line_number": 571, "usage_type": "call"}, {"api_name": "participants.models.Adjudicator", "line_number": 579, "usage_type": "argument"}, {"api_name": "utils.misc.reverse_tournament", "line_number": 580, "usage_type": "call"}, {"api_name": "participants.models.Team", "line_number": 582, "usage_type": "argument"}, {"api_name": "utils.misc.reverse_tournament", "line_number": 583, "usage_type": "call"}, {"api_name": "actionlog.mixins.LogActionMixin", "line_number": 593, "usage_type": "name"}, {"api_name": "utils.mixins.AdministratorMixin", "line_number": 593, "usage_type": "name"}, {"api_name": "tournaments.mixins.TournamentMixin", "line_number": 593, "usage_type": "name"}, {"api_name": "utils.views.PostOnlyRedirectView", "line_number": 593, "usage_type": "name"}, {"api_name": "participants.models.Adjudicator.objects.get", "line_number": 601, "usage_type": "call"}, {"api_name": "participants.models.Adjudicator.objects", "line_number": 601, "usage_type": "attribute"}, {"api_name": "participants.models.Adjudicator", "line_number": 601, "usage_type": "name"}, {"api_name": "participants.models.Adjudicator.DoesNotExist", "line_number": 602, "usage_type": "attribute"}, {"api_name": "participants.models.Adjudicator", "line_number": 602, "usage_type": "name"}, {"api_name": "participants.models.Adjudicator.MultipleObjectsReturned", "line_number": 602, "usage_type": "attribute"}, {"api_name": "django.contrib.messages.error", "line_number": 612, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 612, "usage_type": "name"}, {"api_name": "actionlog.models.ActionLogEntry.ACTION_TYPE_TEST_SCORE_EDIT", "line_number": 619, "usage_type": "attribute"}, {"api_name": "actionlog.models.ActionLogEntry", "line_number": 619, "usage_type": "name"}, {"api_name": "models.AdjudicatorTestScoreHistory", "line_number": 631, "usage_type": "call"}, {"api_name": "utils.mixins.AdministratorMixin", "line_number": 638, "usage_type": "name"}, {"api_name": "tournaments.mixins.TournamentMixin", "line_number": 638, "usage_type": "name"}, {"api_name": "actionlog.mixins.LogActionMixin", "line_number": 638, "usage_type": "name"}, {"api_name": "django.views.generic.base.View", "line_number": 638, "usage_type": "name"}, {"api_name": "actionlog.models.ActionLogEntry.ACTION_TYPE_ADJUDICATOR_BREAK_SET", "line_number": 640, "usage_type": "attribute"}, {"api_name": "actionlog.models.ActionLogEntry", "line_number": 640, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 644, "usage_type": "call"}, {"api_name": "participants.models.Adjudicator.objects.get", "line_number": 645, "usage_type": "call"}, {"api_name": "participants.models.Adjudicator.objects", "line_number": 645, "usage_type": "attribute"}, {"api_name": "participants.models.Adjudicator", "line_number": 645, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 648, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 648, "usage_type": "call"}, {"api_name": "actionlog.models.ActionLogEntry.ACTION_TYPE_ADJUDICATOR_NOTE_SET", "line_number": 653, "usage_type": "attribute"}, {"api_name": "actionlog.models.ActionLogEntry", "line_number": 653, "usage_type": "name"}, {"api_name": "tournaments.mixins.TournamentMixin", "line_number": 665, "usage_type": "name"}, {"api_name": "utils.views.VueTableTemplateView", "line_number": 665, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 667, "usage_type": "call"}, {"api_name": "progress.get_feedback_progress", "line_number": 673, "usage_type": "call"}, {"api_name": "progress.num_unsubmitted", "line_number": 678, "usage_type": "call"}, {"api_name": "tables.FeedbackTableBuilder", "line_number": 684, "usage_type": "call"}, {"api_name": "progress.adjudicator", "line_number": 686, "usage_type": "attribute"}, {"api_name": "tables.FeedbackTableBuilder", "line_number": 690, "usage_type": "call"}, {"api_name": "progress.team", "line_number": 692, "usage_type": "attribute"}, {"api_name": "utils.mixins.AdministratorMixin", "line_number": 699, "usage_type": "name"}, {"api_name": "tournaments.mixins.PublicTournamentPageMixin", "line_number": 703, "usage_type": "name"}, {"api_name": "utils.mixins.AdministratorMixin", "line_number": 711, "usage_type": "name"}, {"api_name": "actionlog.mixins.LogActionMixin", "line_number": 711, "usage_type": "name"}, {"api_name": "tournaments.mixins.TournamentMixin", "line_number": 711, "usage_type": "name"}, {"api_name": "django.views.generic.edit.FormView", "line_number": 711, "usage_type": "name"}, {"api_name": "forms.UpdateAdjudicatorScoresForm", "line_number": 713, "usage_type": "name"}, {"api_name": "actionlog.models.ActionLogEntry.ACTION_TYPE_UPDATE_ADJUDICATOR_SCORES", "line_number": 714, "usage_type": "attribute"}, {"api_name": "actionlog.models.ActionLogEntry", "line_number": 714, "usage_type": "name"}, {"api_name": "utils.misc.reverse_tournament", "line_number": 731, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 735, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 735, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 735, "usage_type": "call"}]} +{"seq_id": "72938249", "text": "#!/usr/bin/env python\n\"\"\"\nfitting GUI for XRF display\n\"\"\"\nimport time\nimport copy\nfrom functools import partial\nfrom collections import OrderedDict\n\nfrom threading import Thread\n\nimport json\nimport numpy as np\nimport wx\nimport wx.lib.agw.pycollapsiblepane as CP\nimport wx.lib.scrolledpanel as scrolled\nimport wx.dataview as dv\nDVSTYLE = dv.DV_SINGLE|dv.DV_VERT_RULES|dv.DV_ROW_LINES\n\nfrom peakutils import peak\n\nfrom lmfit import Parameter, Minimizer\nfrom lmfit.printfuncs import gformat\n\nfrom wxutils import (SimpleText, FloatCtrl, FloatSpin, Choice, Font, pack,\n Button, Check, HLine, GridPanel, RowPanel, CEN, LEFT,\n RIGHT, FileSave, GUIColors, RCEN, LCEN, FRAMESTYLE,\n BitmapButton, SetTip, GridPanel, Popup,\n FloatSpinWithPin, get_icon, fix_filename)\n\nfrom . import FONTSIZE\nfrom xraydb import (material_mu, xray_edge, materials, add_material,\n atomic_number, atomic_symbol, xray_line)\nfrom .notebooks import flatnotebook\nfrom .parameter import ParameterPanel\nfrom .periodictable import PeriodicTablePanel\n\nfrom larch import Group\n\nfrom ..xrf import xrf_background, MCA, FanoFactors\nfrom ..utils.jsonutils import encode4js, decode4js\n\nfrom .xrfdisplay_utils import XRFGROUP, mcaname\n\ndef read_filterdata(flist, _larch):\n \"\"\" read filters data\"\"\"\n materials = _larch.symtable.get_symbol('_xray._materials')\n out = OrderedDict()\n out['None'] = ('', 0)\n for name in flist:\n if name in materials:\n out[name] = materials[name]\n return out\n\ndef VarChoice(p, default=0, size=(75, -1)):\n return Choice(p, choices=['Fix', 'Vary'],\n size=size, default=default)\n\nNFILTERS = 4\nMATRIXLAYERNAMES = ('top', 'middle', 'bottom')\nNMATRIX = len(MATRIXLAYERNAMES)\nMIN_CORREL = 0.10\n\ntooltips = {'ptable': 'Select Elements to include in model',\n 'step': 'size of step extending to low energy side of peak, fraction of peak height',\n 'gamma': 'gamma (lorentzian-like weight) of Voigt function',\n 'tail': 'intensity of tail function at low energy side of peak',\n 'beta': 'width of tail function at low energy side of peak',\n 'sigmax': 'scale sigma from Energy/Noise by this amount',\n }\n\nCompositionUnits = ('ng/mm^2', 'wt %', 'ppm')\n\nDetector_Materials = ['Si', 'Ge']\nEFano_Text = 'Peak Widths: sigma = sqrt(E_Fano * Energy + Noise**2) '\nGeom_Text = 'Angles in degrees: 90=normal to surface, 0=grazing surface'\nEnergy_Text = 'All energies in keV'\n\nmca_init = \"\"\"\nif not hasattr({group:s}, 'fit_history'): {group:s}.fit_history = []\n\"\"\"\n\nxrfmod_setup = \"\"\"## Set up XRF Model\n_xrfmodel = xrf_model(xray_energy={en_xray:.2f}, count_time={count_time:.5f},\n energy_min={en_min:.2f}, energy_max={en_max:.2f})\n\n_xrfmodel.set_detector(thickness={det_thk:.5f}, material='{det_mat:s}',\n cal_offset={cal_offset:.5f}, cal_slope={cal_slope:.5f},\n vary_cal_offset={cal_vary!r}, vary_cal_slope={cal_vary!r},\n peak_step={peak_step:.5f}, vary_peak_step={peak_step_vary:s},\n peak_tail={peak_tail:.5f}, vary_peak_tail={peak_tail_vary:s},\n peak_beta={peak_beta:.5f}, vary_peak_beta={peak_beta_vary:s},\n peak_gamma={peak_gamma:.5f}, vary_peak_gamma={peak_gamma_vary:s},\n noise={det_noise:.5f}, vary_noise={det_noise_vary:s})\n\"\"\"\n\nxrfmod_scattpeak = \"\"\"_xrfmodel.add_scatter_peak(name='{peakname:s}', center={_cen:.2f},\n amplitude=1e5, step={_step:.5f}, tail={_tail:.5f}, beta={_beta:.5f},\n sigmax={_sigma:.5f}, vary_center={vcen:s}, vary_step={vstep:s},\n vary_tail={vtail:s}, vary_beta={vbeta:s}, vary_sigmax={vsigma:s})\n\"\"\"\n\nxrfmod_fitscript = \"\"\"\n_xrfmodel.fit_spectrum({group:s}.energy, {group:s}.counts,\n energy_min={emin:.2f}, energy_max={emax:.2f})\n_xrfresult = _xrfmodel.compile_fitresults()\n\"\"\"\n\nxrfmod_filter = \"_xrfmodel.add_filter('{name:s}', {thick:.5f}, vary_thickness={vary:s})\"\nxrfmod_matrix = \"_xrfmodel.add_matrix_layer('{name:s}', {thick:.5f}, density={density:.5f})\"\n\nxrfmod_bgr = \"\"\"xrf_background(energy={group:s}.energy, counts={group:s}.counts,\n group={group:s}, width={bgr_wid:.2f}, exponent={bgr_exp:.2f})\n_xrfmodel.add_background({group:s}.bgr, vary=False)\n\"\"\"\n\nxrfmod_jsondump = \"\"\"# save xrf model to json\n_o = copy(group2dict({group:s}.fit_history[{nfit:d}]))\n_o['params'] = _o.pop('params').dumps()\njson_dump(_o, '{filename:s}')\n\"\"\"\n\nxrfmod_pileup = \"_xrfmodel.add_pileup(scale={scale:.3f}, vary={vary:s})\"\nxrfmod_escape = \"_xrfmodel.add_escape(scale={scale:.3f}, vary={vary:s})\"\n\nxrfmod_elems = \"\"\"\nfor atsym in {elemlist:s}:\n _xrfmodel.add_element(atsym)\n#endfor\n\"\"\"\n\nFilter_Lengths = ['microns', 'mm', 'cm']\nFilter_Materials = ['None', 'air', 'nitrogen', 'helium', 'kapton',\n 'beryllium', 'aluminum', 'mylar', 'pmma']\n\n\nclass FitSpectraFrame(wx.Frame):\n \"\"\"Frame for Spectral Analysis\"\"\"\n\n def __init__(self, parent, size=(655, 780)):\n self.parent = parent\n self._larch = parent.larch\n\n # fetch current spectra from parent\n xrfgroup = self._larch.symtable.get_group(XRFGROUP)\n mcagroup = getattr(xrfgroup, '_mca')\n self.mca = getattr(xrfgroup, mcagroup)\n self.mcagroup = '%s.%s' % (XRFGROUP, mcagroup)\n\n efactor = 1.0 if max(self.mca.energy) < 250. else 1000.0\n\n if self.mca.incident_energy is None:\n self.mca.incident_energy = 20.0\n if self.mca.incident_energy > 250:\n self.mca.incident_energy /= 1000.0\n\n self._larch.eval(mca_init.format(group=self.mcagroup))\n self.fit_history = getattr(self.mca, 'fit_history', [])\n self.nfit = 0\n self.colors = GUIColors()\n wx.Frame.__init__(self, parent, -1, 'Fit XRF Spectra',\n size=size, style=wx.DEFAULT_FRAME_STYLE)\n\n self.wids = {}\n self.owids = {}\n self.result_frame = None\n self.panels = {}\n self.panels['Beam & Detector'] = self.beamdet_page\n self.panels['Filters & Matrix'] = self.materials_page\n self.panels['Elements & Peaks'] = self.elempeaks_page\n self.panels['Fit Results'] = self.fitresult_page\n self.panels['Composition'] = self.composition_page\n\n self.nb = flatnotebook(self, self.panels,\n on_change=self.onNBChanged)\n\n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.Add(self.nb, 1, wx.ALL|wx.EXPAND)\n\n bpanel = wx.Panel(self)\n self.SetBackgroundColour((235, 235, 235))\n bsizer = wx.BoxSizer(wx.HORIZONTAL)\n bsizer.Add(Button(bpanel, 'Calculate Model', size=(200, -1),\n action=self.onShowModel), 0, LEFT)\n bsizer.Add(Button(bpanel, 'Fit Model', size=(200, -1),\n action=self.onFitModel), 0, LEFT)\n\n pack(bpanel, bsizer)\n sizer.Add(bpanel, 0, CEN)\n sizer.Add((5,5))\n pack(self, sizer)\n self.Show()\n self.Raise()\n\n def onNBChanged(self, event=None):\n pagelabel = self.nb._pages.GetPageText(event.GetSelection()).strip()\n if pagelabel.startswith('Composition'):\n self.UpdateCompositionPage()\n\n def elempeaks_page(self, **kws):\n \"elements and peaks parameters\"\n mca = self.parent.mca\n wids = self.wids\n p = GridPanel(self)\n self.selected_elems = []\n self.ptable = PeriodicTablePanel(p, multi_select=True, fontsize=12,\n tooltip_msg=tooltips['ptable'],\n onselect=self.onElemSelect)\n\n dstep, dtail, dbeta, dgamma = 0.05, 0.10, 0.5, 0.05\n wids['peak_step'] = FloatSpin(p, value=dstep, digits=3, min_val=0,\n max_val=1.0, increment=0.01,\n tooltip=tooltips['step'])\n wids['peak_gamma'] = FloatSpin(p, value=dgamma, digits=3, min_val=0,\n max_val=10.0, increment=0.01,\n tooltip=tooltips['gamma'])\n wids['peak_tail'] = FloatSpin(p, value=dtail, digits=3, min_val=0,\n max_val=1.0, increment=0.05,\n tooltip=tooltips['tail'])\n\n wids['peak_beta'] = FloatSpin(p, value=dbeta, digits=3, min_val=0,\n max_val=10.0, increment=0.01,\n tooltip=tooltips['beta'])\n wids['peak_step_vary'] = VarChoice(p, default=0)\n wids['peak_tail_vary'] = VarChoice(p, default=0)\n wids['peak_gamma_vary'] = VarChoice(p, default=0)\n wids['peak_beta_vary'] = VarChoice(p, default=0)\n\n\n btn_from_peaks = Button(p, 'Guess Peaks', size=(150, -1),\n action=self.onElems_GuessPeaks)\n # tooltip='Guess elements from peak locations')\n btn_from_rois = Button(p, 'Use ROIS as Peaks', size=(150, -1),\n action=self.onElems_FromROIS)\n btn_clear_elems = Button(p, 'Clear All Peaks', size=(150, -1),\n action=self.onElems_Clear)\n wx.CallAfter(self.onElems_GuessPeaks)\n\n p.AddText('Elements to model:', colour='#880000', dcol=2)\n p.Add((2, 2), newrow=True)\n p.Add(self.ptable, dcol=5, drow=5)\n irow = p.irow\n\n p.Add(btn_from_peaks, icol=6, dcol=2, irow=irow)\n p.Add(btn_from_rois, icol=6, dcol=2, irow=irow+1)\n p.Add(btn_clear_elems, icol=6, dcol=2, irow=irow+2)\n p.irow += 5\n\n p.Add((2, 2), newrow=True)\n p.AddText(' Step: ', tooltip=tooltips['step'])\n p.Add(wids['peak_step'])\n p.Add(wids['peak_step_vary'])\n\n p.AddText(' Gamma : ')\n p.Add(wids['peak_gamma'])\n p.Add(wids['peak_gamma_vary'])\n\n p.Add((2, 2), newrow=True)\n p.AddText(' Beta: ')\n p.Add(wids['peak_beta'])\n p.Add(wids['peak_beta_vary'])\n\n p.AddText(' Tail: ')\n p.Add(wids['peak_tail'])\n p.Add(wids['peak_tail_vary'])\n\n p.Add((2, 2), newrow=True)\n p.Add(HLine(p, size=(550, 3)), dcol=8)\n p.Add((2, 2), newrow=True)\n\n # name, escale, step, sigmax, beta, tail\n scatter_peaks = (('Elastic', 1.00, 0.05, 1.0, 0.5, 0.10),\n ('Compton1', 0.97, 0.05, 1.5, 2.0, 0.25),\n ('Compton2', 0.94, 0.05, 2.0, 2.5, 0.25))\n opts = dict(size=(100, -1), min_val=0, digits=4, increment=0.010)\n for name, escale, dstep, dsigma, dbeta, dtail in scatter_peaks:\n en = escale * self.mca.incident_energy\n t = name.lower()\n vary_en = 1 if t.startswith('compton') else 0\n\n wids['%s_use'%t] = Check(p, label='Include', default=True)\n wids['%s_cen_vary'%t] = VarChoice(p, default=vary_en)\n wids['%s_step_vary'%t] = VarChoice(p, default=0)\n wids['%s_beta_vary'%t] = VarChoice(p, default=0)\n wids['%s_tail_vary'%t] = VarChoice(p, default=0)\n wids['%s_sigma_vary'%t] = VarChoice(p, default=0)\n\n wids['%s_cen'%t] = FloatSpin(p, value=en, digits=3, min_val=0,\n increment=0.01)\n wids['%s_step'%t] = FloatSpin(p, value=dstep, digits=3, min_val=0,\n max_val=1.0, increment=0.01,\n tooltip=tooltips['step'])\n wids['%s_tail'%t] = FloatSpin(p, value=dtail, digits=3, min_val=0,\n max_val=1.0, increment=0.05,\n tooltip=tooltips['tail'])\n wids['%s_beta'%t] = FloatSpin(p, value=dbeta, digits=3, min_val=0,\n max_val=10.0, increment=0.10,\n tooltip=tooltips['beta'])\n wids['%s_sigma'%t] = FloatSpin(p, value=dsigma, digits=3, min_val=0,\n max_val=10.0, increment=0.05,\n tooltip=tooltips['sigmax'])\n\n p.Add((2, 2), newrow=True)\n p.AddText(\" %s Peak:\" % name, colour='#880000')\n p.Add(wids['%s_use' % t], dcol=2)\n\n p.AddText(' Energy (keV): ')\n p.Add(wids['%s_cen'%t])\n p.Add(wids['%s_cen_vary'%t])\n\n p.Add((2, 2), newrow=True)\n p.AddText(' Step: ')\n p.Add(wids['%s_step'%t])\n p.Add(wids['%s_step_vary'%t])\n\n p.AddText(' Sigma Scale : ')\n p.Add(wids['%s_sigma'%t])\n p.Add(wids['%s_sigma_vary'%t])\n\n p.Add((2, 2), newrow=True)\n p.AddText(' Beta : ')\n p.Add(wids['%s_beta'%t])\n p.Add(wids['%s_beta_vary'%t])\n\n p.AddText(' Tail: ')\n p.Add(wids['%s_tail'%t])\n p.Add(wids['%s_tail_vary'%t])\n\n p.Add((2, 2), newrow=True)\n p.Add(HLine(p, size=(550, 3)), dcol=7)\n\n p.pack()\n return p\n\n def beamdet_page(self, **kws):\n \"beam / detector settings\"\n mca = self.mca\n en_min = 2.0\n en_max = self.mca.incident_energy\n\n cal_offset = getattr(mca, 'offset', 0)\n cal_slope = getattr(mca, 'slope', 0.010)\n det_noise = getattr(mca, 'det_noise', 0.035)\n width = getattr(mca, 'bgr_width', 3000)\n expon = getattr(mca, 'bgr_exponent', 2)\n escape_amp = getattr(mca, 'escape_amp', 1.0)\n pileup_amp = getattr(mca, 'pileup_amp', 0.1)\n\n wids = self.wids\n # main = wx.Panel(self)\n pdet = GridPanel(self, itemstyle=LEFT)\n\n def addLine(pan):\n pan.Add(HLine(pan, size=(600, 3)), dcol=6, newrow=True)\n\n bgr_code1 = \"\"\"\n wids['bgr_use'] = Check(pdet, label='Include Background in Fit',\n default=False, action=self.onUseBackground)\n wids['bgr_width'] = FloatSpin(pdet, value=width, min_val=0, max_val=15000,\n digits=0, increment=500, size=(100, -1))\n wids['bgr_expon'] = Choice(pdet, choices=['2', '4', '6'],\n size=(70, -1), default=0)\n wids['bgr_show'] = Button(pdet, 'Show', size=(80, -1),\n action=self.onShowBgr)\n wids['bgr_width'].Disable()\n wids['bgr_expon'].Disable()\n wids['bgr_show'].Disable()\n \"\"\"\n\n wids['escape_use'] = Check(pdet, label='Include Escape in Fit',\n default=True, action=self.onUsePileupEscape)\n wids['escape_amp'] = FloatSpin(pdet, value=escape_amp,\n min_val=0, max_val=100, digits=2,\n increment=0.02, size=(100, -1))\n\n wids['pileup_use'] = Check(pdet, label='Include Pileup in Fit',\n default=True, action=self.onUsePileupEscape)\n wids['pileup_amp'] = FloatSpin(pdet, value=pileup_amp,\n min_val=0, max_val=100, digits=2,\n increment=0.02, size=(100, -1))\n\n wids['escape_amp_vary'] = VarChoice(pdet, default=True)\n wids['pileup_amp_vary'] = VarChoice(pdet, default=True)\n\n\n wids['cal_slope'] = FloatSpin(pdet, value=cal_slope,\n min_val=0, max_val=100,\n digits=4, increment=0.01, size=(100, -1))\n wids['cal_offset'] = FloatSpin(pdet, value=cal_offset,\n min_val=-500, max_val=500,\n digits=4, increment=0.01, size=(100, -1))\n\n wids['cal_vary'] = Check(pdet, label='Vary Calibration in Fit', default=True)\n\n wids['det_mat'] = Choice(pdet, choices=Detector_Materials,\n size=(70, -1), default=0,\n action=self.onDetMaterial)\n\n wids['det_thk'] = FloatSpin(pdet, value=0.400, size=(100, -1),\n increment=0.010, min_val=0, max_val=10,\n digits=4)\n\n wids['det_noise_vary'] = VarChoice(pdet, default=1)\n\n opts = dict(size=(100, -1), min_val=0, max_val=250000,\n digits=2, increment=50)\n wids['en_xray'] = FloatSpin(pdet, value=self.mca.incident_energy,\n action=self.onSetXrayEnergy, **opts)\n wids['en_min'] = FloatSpin(pdet, value=en_min, **opts)\n wids['en_max'] = FloatSpin(pdet, value=en_max, **opts)\n\n opts.update({'digits': 3, 'max_val': 500, 'increment': 1})\n wids['det_noise'] = FloatSpin(pdet, value=det_noise, **opts)\n wids['det_efano'] = SimpleText(pdet, size=(200, -1),\n label='E_Fano= %.4e' % FanoFactors['Si'])\n\n opts.update(digits=1, max_val=90, min_val=0, increment=1)\n wids['angle_in'] = FloatSpin(pdet, value=45, **opts)\n wids['angle_out'] = FloatSpin(pdet, value=45, **opts)\n\n pdet.AddText(' Beam Energy, Fit Range :', colour='#880000', dcol=2)\n pdet.AddText(Energy_Text, dcol=2)\n pdet.AddText(' X-ray Energy: ', newrow=True)\n pdet.Add(wids['en_xray'])\n pdet.AddText(' Energy Min: ', newrow=True)\n pdet.Add(wids['en_min'])\n pdet.AddText('Energy Max: ')\n pdet.Add(wids['en_max'])\n\n addLine(pdet)\n pdet.AddText(' Energy Calibration :', colour='#880000', dcol=2, newrow=True)\n pdet.Add(wids['cal_vary'], dcol=2)\n pdet.AddText(' Offset (keV): ', newrow=True)\n pdet.Add(wids['cal_offset'])\n pdet.AddText('Slope (keV/bin): ')\n pdet.Add(wids['cal_slope'])\n\n addLine(pdet)\n pdet.AddText(' Detector :', colour='#880000', newrow=True)\n pdet.AddText(EFano_Text, dcol=3)\n pdet.AddText(' Material: ', newrow=True)\n pdet.Add(wids['det_mat'])\n pdet.Add(wids['det_efano'], dcol=2)\n pdet.AddText(' Thickness (mm): ', newrow=True)\n pdet.Add(wids['det_thk'])\n pdet.AddText(' Noise (keV): ', newrow=True)\n pdet.Add(wids['det_noise'])\n pdet.Add(wids['det_noise_vary'], dcol=2)\n\n addLine(pdet)\n pdet.AddText(' Escape && Pileup:', colour='#880000', newrow=True)\n pdet.AddText(' Escape Scale:', newrow=True)\n pdet.Add(wids['escape_amp'])\n pdet.Add(wids['escape_amp_vary'])\n pdet.Add(wids['escape_use'], dcol=3)\n\n pdet.AddText(' Pileup Scale:', newrow=True)\n pdet.Add(wids['pileup_amp'])\n pdet.Add(wids['pileup_amp_vary'])\n pdet.Add(wids['pileup_use'], dcol=3)\n\n\n addLine(pdet)\n pdet.AddText(' Geometry:', colour='#880000', newrow=True)\n pdet.AddText(Geom_Text, dcol=3)\n pdet.AddText(' Incident Angle:', newrow=True)\n pdet.Add(wids['angle_in'])\n pdet.AddText(' Exit Angle:', newrow=False)\n pdet.Add(wids['angle_out'])\n\n addLine(pdet)\n pdet.pack()\n return pdet\n\n def materials_page(self, **kws):\n \"filters and matrix settings\"\n wids = self.wids\n pan = GridPanel(self, itemstyle=LEFT)\n\n pan.AddText(' Filters :', colour='#880000', dcol=2) # , newrow=True)\n pan.AddManyText((' Filter #', 'Material', 'Thickness (mm)',\n 'Vary Thickness'), style=CEN, newrow=True)\n opts = dict(size=(100, -1), min_val=0, digits=5, increment=0.005)\n\n for i in range(NFILTERS):\n t = 'filter%d' % (i+1)\n wids['%s_mat'%t] = Choice(pan, choices=Filter_Materials, default=0,\n size=(150, -1),\n action=partial(self.onFilterMaterial, index=i+1))\n wids['%s_thk'%t] = FloatSpin(pan, value=0.0, **opts)\n wids['%s_var'%t] = VarChoice(pan, default=0)\n if i == 0: # first selection\n wids['%s_mat'%t].SetStringSelection('beryllium')\n wids['%s_thk'%t].SetValue(0.0250)\n elif i == 1: # second selection\n wids['%s_mat'%t].SetStringSelection('air')\n wids['%s_thk'%t].SetValue(50.00)\n elif i == 2: # third selection\n wids['%s_mat'%t].SetStringSelection('kapton')\n wids['%s_thk'%t].SetValue(0.00)\n elif i == 3: # third selection\n wids['%s_mat'%t].SetStringSelection('aluminum')\n wids['%s_thk'%t].SetValue(0.00)\n\n pan.AddText(' # %i ' % (i+1), newrow=True)\n pan.Add(wids['%s_mat' % t])\n pan.Add(wids['%s_thk' % t])\n pan.Add(wids['%s_var' % t])\n\n pan.Add(HLine(pan, size=(600, 3)), dcol=6, newrow=True)\n\n pan.AddText(' Matrix Layers:', colour='#880000', dcol=2, newrow=True)\n pan.AddManyText((' Layer', 'Material/Formula',\n 'thickness (mm)', 'density'), style=CEN, newrow=True)\n\n for i in range(NMATRIX):\n t = 'matrix%d' % (i+1)\n wids['%s_mat'%t] = wx.TextCtrl(pan, value='', size=(150, -1))\n wids['%s_thk'%t] = FloatSpin(pan, value=0.0, **opts)\n wids['%s_den'%t] = FloatSpin(pan, value=1.0, **opts)\n wids['%s_btn'%t] = Button(pan, 'Use Material', size=(175, -1),\n action=partial(self.onUseCurrentMaterialAsFilter,\n layer=i+1))\n wids['%s_btn'%t].Disable()\n pan.AddText(' %s' % (MATRIXLAYERNAMES[i]), style=LCEN, newrow=True)\n pan.Add(wids['%s_mat' % t])\n pan.Add(wids['%s_thk' % t])\n pan.Add(wids['%s_den' % t])\n pan.Add(wids['%s_btn' % t])\n\n pan.Add(HLine(pan, size=(600, 3)), dcol=6, newrow=True)\n\n # Materials\n pan.AddText(' Known Materials:', colour='#880000', dcol=4, newrow=True)\n bx = Button(pan, 'Update Filter List', size=(175, -1),\n action=self.onUpdateFilterList)\n pan.Add(bx)\n\n mview = self.owids['materials'] = dv.DataViewListCtrl(pan, style=DVSTYLE)\n mview.Bind(dv.EVT_DATAVIEW_SELECTION_CHANGED, self.onSelectMaterial)\n self.selected_material = ''\n\n mview.AppendTextColumn('Name', width=150)\n mview.AppendTextColumn('Formula', width=325)\n mview.AppendTextColumn('density', width=90)\n mview.AppendToggleColumn('Filter?', width=50)\n for col in range(4):\n this = mview.Columns[col]\n align = wx.ALIGN_LEFT\n this.Sortable = True\n this.Alignment = this.Renderer.Alignment = align\n\n mview.SetMinSize((650, 170))\n mview.DeleteAllItems()\n self.materials_data = {}\n for name, data in materials._read_materials_db().items():\n formula, density = data\n self.materials_data[name] = (formula, density)\n mview.AppendItem((name, formula, \"%9.6f\"%density,\n name in Filter_Materials))\n pan.Add(mview, dcol=5, newrow=True)\n\n pan.AddText(' Add Material:', colour='#880000', dcol=2, newrow=True)\n self.owids['newmat_name'] = wx.TextCtrl(pan, value='', size=(150, -1))\n self.owids['newmat_dens'] = FloatSpin(pan, value=1.0, **opts)\n self.owids['newmat_form'] = wx.TextCtrl(pan, value='', size=(400, -1))\n\n pan.AddText(' Name:', newrow=True)\n pan.Add(self.owids['newmat_name'])\n pan.AddText(' Density:', newrow=False)\n pan.Add(self.owids['newmat_dens'])\n pan.AddText(' gr/cm^3', newrow=False)\n pan.AddText(' Formula:', newrow=True)\n pan.Add(self.owids['newmat_form'], dcol=3)\n pan.Add(Button(pan, 'Add Material', size=(175, -1),\n action=self.onAddMaterial))\n pan.pack()\n return pan\n\n def fitresult_page(self, **kws):\n sizer = wx.GridBagSizer(10, 5)\n panel = scrolled.ScrolledPanel(self)\n # title row\n wids = self.owids\n title = SimpleText(panel, 'Fit Results', font=Font(FONTSIZE+1),\n colour=self.colors.title, style=LCEN)\n\n wids['data_title'] = SimpleText(panel, '< > ', font=Font(FONTSIZE+1),\n colour=self.colors.title, style=LCEN)\n\n wids['fitlabel_lab'] = SimpleText(panel, ' Fit Label: ')\n wids['fitlabel_txt'] = wx.TextCtrl(panel, -1, ' ', size=(150, -1))\n wids['fitlabel_btn'] = Button(panel, 'Set Label', size=(150, -1),\n action=self.onChangeFitLabel)\n\n opts = dict(default=False, size=(175, -1), action=self.onPlot)\n wids['plot_comps'] = Check(panel, label='Show Components?', **opts)\n self.plot_choice = Button(panel, 'Plot',\n size=(150, -1), action=self.onPlot)\n\n self.save_result = Button(panel, 'Save Model',\n size=(150, -1), action=self.onSaveFitResult)\n SetTip(self.save_result, 'save model and result to be loaded later')\n\n self.export_fit = Button(panel, 'Export Fit',\n size=(150, -1), action=self.onExportFitResult)\n SetTip(self.export_fit, 'save arrays and results to text file')\n\n irow = 0\n sizer.Add(title, (irow, 0), (1, 1), LCEN)\n sizer.Add(wids['data_title'], (irow, 1), (1, 3), LCEN)\n\n irow += 1\n sizer.Add(self.save_result, (irow, 0), (1, 1), LCEN)\n sizer.Add(self.export_fit, (irow, 1), (1, 1), LCEN)\n sizer.Add(self.plot_choice, (irow, 2), (1, 1), LCEN)\n sizer.Add(wids['plot_comps'], (irow, 3), (1, 1), LCEN)\n\n irow += 1\n sizer.Add(wids['fitlabel_lab'], (irow, 0), (1, 1), LCEN)\n sizer.Add(wids['fitlabel_txt'], (irow, 1), (1, 1), LCEN)\n sizer.Add(wids['fitlabel_btn'], (irow, 2), (1, 2), LCEN)\n\n\n irow += 1\n sizer.Add(HLine(panel, size=(625, 3)), (irow, 0), (1, 5), LCEN)\n\n irow += 1\n title = SimpleText(panel, '[[Fit Statistics]]', font=Font(FONTSIZE+1),\n colour=self.colors.title, style=LCEN)\n sizer.Add(title, (irow, 0), (1, 4), LCEN)\n\n sview = wids['stats'] = dv.DataViewListCtrl(panel, style=DVSTYLE)\n sview.Bind(dv.EVT_DATAVIEW_SELECTION_CHANGED, self.onSelectFit)\n sview.AppendTextColumn(' Fit Label', width=90)\n sview.AppendTextColumn(' N_vary', width=65)\n sview.AppendTextColumn(' N_eval', width=65)\n sview.AppendTextColumn(' \\u03c7\\u00B2', width=125)\n sview.AppendTextColumn(' \\u03c7\\u00B2_reduced', width=125)\n sview.AppendTextColumn(' Akaike Info', width=125)\n\n for col in range(sview.ColumnCount):\n this = sview.Columns[col]\n isort, align = True, wx.ALIGN_RIGHT\n if col == 0:\n align = wx.ALIGN_CENTER\n this.Sortable = isort\n this.Alignment = this.Renderer.Alignment = align\n sview.SetMinSize((650, 150))\n\n irow += 1\n sizer.Add(sview, (irow, 0), (1, 5), LCEN)\n\n irow += 1\n sizer.Add(HLine(panel, size=(625, 3)), (irow, 0), (1, 5), LCEN)\n\n irow += 1\n title = SimpleText(panel, '[[Variables]]', font=Font(FONTSIZE+1),\n colour=self.colors.title, style=LCEN)\n sizer.Add(title, (irow, 0), (1, 1), LCEN)\n\n pview = wids['params'] = dv.DataViewListCtrl(panel, style=DVSTYLE)\n wids['paramsdata'] = []\n pview.AppendTextColumn('Parameter', width=150)\n pview.AppendTextColumn('Refined Value', width=100)\n pview.AppendTextColumn('Standard Error', width=100)\n pview.AppendTextColumn('% Uncertainty', width=100)\n pview.AppendTextColumn('Initial Value', width=150)\n\n for col in range(4):\n this = pview.Columns[col]\n align = wx.ALIGN_LEFT\n if col > 0:\n align = wx.ALIGN_RIGHT\n this.Sortable = False\n this.Alignment = this.Renderer.Alignment = align\n\n pview.SetMinSize((650, 200))\n pview.Bind(dv.EVT_DATAVIEW_SELECTION_CHANGED, self.onSelectParameter)\n\n irow += 1\n sizer.Add(pview, (irow, 0), (1, 5), LCEN)\n\n irow += 1\n sizer.Add(HLine(panel, size=(625, 3)), (irow, 0), (1, 5), LCEN)\n\n irow += 1\n title = SimpleText(panel, '[[Correlations]]', font=Font(FONTSIZE+1),\n colour=self.colors.title, style=LCEN)\n\n wids['all_correl'] = Button(panel, 'Show All',\n size=(100, -1), action=self.onAllCorrel)\n\n wids['min_correl'] = FloatSpin(panel, value=MIN_CORREL,\n min_val=0, size=(100, -1),\n digits=3, increment=0.1)\n\n ctitle = SimpleText(panel, 'minimum correlation: ')\n sizer.Add(title, (irow, 0), (1, 1), LCEN)\n sizer.Add(ctitle, (irow, 1), (1, 1), LCEN)\n sizer.Add(wids['min_correl'], (irow, 2), (1, 1), LCEN)\n sizer.Add(wids['all_correl'], (irow, 3), (1, 1), LCEN)\n\n cview = wids['correl'] = dv.DataViewListCtrl(panel, style=DVSTYLE)\n\n cview.AppendTextColumn('Parameter 1', width=150)\n cview.AppendTextColumn('Parameter 2', width=150)\n cview.AppendTextColumn('Correlation', width=150)\n\n for col in (0, 1, 2):\n this = cview.Columns[col]\n this.Sortable = False\n align = wx.ALIGN_LEFT\n if col == 2:\n align = wx.ALIGN_RIGHT\n this.Alignment = this.Renderer.Alignment = align\n cview.SetMinSize((600, 125))\n\n irow += 1\n sizer.Add(cview, (irow, 0), (1, 5), LCEN)\n\n pack(panel, sizer)\n panel.SetupScrolling()\n return panel\n\n def composition_page(self, **kws):\n sizer = wx.GridBagSizer(10, 5)\n panel = scrolled.ScrolledPanel(self)\n wids = self.owids\n title = SimpleText(panel, 'Composition Results', font=Font(FONTSIZE+1),\n colour=self.colors.title, style=LCEN)\n wids['data_title2'] = SimpleText(panel, '< > ', font=Font(FONTSIZE+1),\n colour=self.colors.title, style=LCEN)\n\n cview = wids['composition'] = dv.DataViewListCtrl(panel, style=DVSTYLE)\n cview.AppendTextColumn(' Z ', width=50)\n cview.AppendTextColumn(' Element ', width=100)\n cview.AppendTextColumn(' Amplitude', width=150)\n cview.AppendTextColumn(' Concentration', width=150)\n cview.AppendTextColumn(' Uncertainty', width=150)\n\n for col in range(5):\n this = cview.Columns[col]\n align = wx.ALIGN_RIGHT\n if col == 1:\n align = wx.ALIGN_LEFT\n this.Sortable = True\n this.Alignment = this.Renderer.Alignment = align\n\n cview.SetMinSize((650, 400))\n wids['comp_fitlabel'] = Choice(panel, choices=[''], size=(175, -1),\n action=self.onCompSelectFit)\n\n self.compscale_lock = 0.0\n wids['comp_elemchoice'] = Choice(panel, choices=[''], size=(100, -1),\n action=self.onCompSetElemAbundance)\n wids['comp_elemscale'] = FloatSpin(panel, value=1.0, digits=6, min_val=0,\n increment=0.01,\n action=self.onCompSetElemAbundance)\n wids['comp_units'] = Choice(panel, choices=CompositionUnits, size=(100, -1))\n wids['comp_scalevalue'] = FloatCtrl(panel, value=0, size=(200, -1),\n action=self.onCompSetScale)\n\n wids['comp_save'] = Button(panel, 'Save This Concentration Data',\n size=(200, -1), action=self.onCompSave)\n\n irow = 0\n sizer.Add(title, (irow, 0), (1, 2), LCEN)\n sizer.Add(wids['data_title2'], (irow, 2), (1, 5), LCEN)\n irow += 1\n sizer.Add(SimpleText(panel, 'Fit Label:'), (irow, 0), (1, 1), LCEN)\n sizer.Add(wids['comp_fitlabel'], (irow, 1), (1, 5), LCEN)\n\n irow += 1\n sizer.Add(SimpleText(panel, 'Scale Element:'), (irow, 0), (1, 1), LCEN)\n sizer.Add(wids['comp_elemchoice'], (irow, 1), (1, 1), LCEN)\n sizer.Add(SimpleText(panel, ' to:'), (irow, 2), (1, 1), LCEN)\n sizer.Add(wids['comp_elemscale'], (irow, 3), (1, 1), LCEN)\n sizer.Add(wids['comp_units'], (irow, 4), (1, 1), LCEN)\n\n irow += 1\n sizer.Add(SimpleText(panel, 'Scaling Factor:'), (irow, 0), (1, 1), LCEN)\n sizer.Add(wids['comp_scalevalue'], (irow, 1), (1, 3), LCEN)\n\n irow += 1\n sizer.Add(wids['composition'], (irow, 0), (3, 6), LCEN)\n\n irow += 3\n sizer.Add(wids['comp_save'], (irow, 0), (1, 3), LCEN)\n\n pack(panel, sizer)\n panel.SetupScrolling()\n return panel\n\n def onCompSetScale(self, event=None, value=None):\n if len(self.fit_history) < 1 or (time.time() - self.compscale_lock) < 0.25:\n return\n self.compscale_lock = time.time()\n owids = self.owids\n result = self.get_fitresult(nfit=owids['comp_fitlabel'].GetSelection())\n cur_elem = owids['comp_elemchoice'].GetStringSelection()\n conc_vals = {}\n for elem in result.comps.keys():\n parname = 'amp_%s' % elem.lower()\n if parname in result.params:\n par = result.params[parname]\n conc_vals[elem] = [par.value, par.stderr]\n\n scale = self.owids['comp_scalevalue'].GetValue()\n\n owids['comp_elemscale'].SetValue(conc_vals[cur_elem][0]/scale)\n owids['composition'].DeleteAllItems()\n result.concentration_results = conc_vals\n result.concentration_scale = scale\n\n for elem, dat in conc_vals.items():\n zat = \"%d\" % atomic_number(elem)\n val, serr = dat\n rval = \"%15.4f\" % val\n sval = \"%15.4f\" % (val/scale)\n uval = \"%15.4f\" % (serr/scale)\n try:\n uval = uval + ' ({:.2%})'.format(abs(serr/val))\n except ZeroDivisionError:\n pass\n owids['composition'].AppendItem((zat, elem, rval, sval, uval))\n\n def onCompSetElemAbundance(self, event=None, value=None):\n if len(self.fit_history) < 1 or (time.time() - self.compscale_lock) < 0.25:\n return\n self.compscale_lock = time.time()\n owids = self.owids\n result = self.get_fitresult(nfit=owids['comp_fitlabel'].GetSelection())\n cur_elem = owids['comp_elemchoice'].GetStringSelection()\n conc_vals = {}\n for elem in result.comps.keys():\n parname = 'amp_%s' % elem.lower()\n if parname in result.params:\n par = result.params[parname]\n conc_vals[elem] = [par.value, par.stderr]\n\n result.concentration_results = conc_vals\n\n scale = conc_vals[cur_elem][0]/owids['comp_elemscale'].GetValue()\n result.concentration_scale = scale\n owids['comp_scalevalue'].SetValue(scale)\n owids['composition'].DeleteAllItems()\n for elem, dat in conc_vals.items():\n zat = \"%d\" % atomic_number(elem)\n val, serr = dat\n rval = \"%15.4f\" % val\n sval = \"%15.4f\" % (val/scale)\n uval = \"%15.4f\" % (serr/scale)\n try:\n uval = uval + ' ({:.2%})'.format(abs(serr/val))\n except ZeroDivisionError:\n pass\n owids['composition'].AppendItem((zat, elem, rval, sval, uval))\n\n\n def onCompSave(self, event=None):\n result = self.get_fitresult(nfit=self.owids['comp_fitlabel'].GetSelection())\n scale = result.concentration_scale\n deffile = self.mca.filename + '_' + result.label\n deffile = fix_filename(deffile.replace('.', '_')) + '_xrf.csv'\n wcards = \"CSV (*.csv)|*.csv|All files (*.*)|*.*\"\n sfile = FileSave(self, 'Save Concentration Results',\n default_file=deffile,\n wildcard=wcards)\n if sfile is not None:\n buff = [\"# results for file: %s\" % self.mca.filename,\n \"# fit label: %s\" % result.label,\n \"# concentration units: %s\" % self.owids['comp_units'].GetStringSelection(),\n \"# count time: %s\" % result.count_time,\n \"# scale: %s\" % result.concentration_scale,\n \"# Fit Report:\" ]\n for l in result.fit_report.split('\\n'):\n buff.append(\"# %s\" % l)\n buff.append(\"###########\")\n buff.append(\"#Element Concentration Uncertainty RawAmplitude\")\n for elem, dat in result.concentration_results.items():\n eout = (elem + ' '*4)[:4]\n val, serr = dat\n rval = \"%16.5f\" % val\n sval = \"%16.5f\" % (val/scale)\n uval = \"%16.5f\" % (serr/scale)\n buff.append(\" \".join([elem, sval, uval, rval]))\n buff.append('')\n with open(sfile, 'w') as fh:\n fh.write('\\n'.join(buff))\n\n\n def onCompSelectFit(self, event=None):\n result = self.get_fitresult(nfit=self.owids['comp_fitlabel'].GetSelection())\n cur_elem = self.owids['comp_elemchoice'].GetStringSelection()\n self.owids['comp_elemchoice'].Clear()\n elems = [el['symbol'] for el in result.elements]\n self.owids['comp_elemchoice'].SetChoices(elems)\n if len(cur_elem) > 0:\n self.owids['comp_elemchoice'].SetStringSelection(cur_elem)\n else:\n self.owids['comp_elemchoice'].SetSelection(0)\n self.onCompSetElemAbundance()\n\n def UpdateCompositionPage(self, event=None):\n self.fit_history = getattr(self.mca, 'fit_history', [])\n if len(self.fit_history) > 0:\n result = self.get_fitresult()\n fitlab = self.owids['comp_fitlabel']\n fitlab.Clear()\n fitlab.SetChoices([a.label for a in self.fit_history])\n fitlab.SetStringSelection(result.label)\n self.onCompSelectFit()\n\n def onElems_Clear(self, event=None):\n self.ptable.on_clear_all()\n\n def onElems_GuessPeaks(self, event=None):\n mca = self.mca\n _indices = peak.indexes(mca.counts, min_dist=5, thres=0.025)\n peak_energies = mca.energy[_indices]\n\n elrange = range(10, 92)\n atsyms = [atomic_symbol(i) for i in elrange]\n kalphas = [0.001*xray_line(i, 'Ka').energy for i in elrange]\n kbetas = [0.001*xray_line(i, 'Kb').energy for i in elrange]\n\n self.ptable.on_clear_all()\n elems = []\n for iz, en in enumerate(peak_energies):\n for i, ex in enumerate(kalphas):\n if abs(en - ex) < 0.025:\n elems.append(atsyms[i])\n peak_energies[iz] = -ex\n\n for iz, en in enumerate(peak_energies):\n if en > 0:\n for i, ex in enumerate(kbetas):\n if abs(en - ex) < 0.025:\n if atsyms[i] not in elems:\n elems.append(atsyms[i])\n peak_energies[iz] = -ex\n\n en = self.wids['en_xray'].GetValue()\n emin = self.wids['en_min'].GetValue()\n for elem in elems:\n kedge = 0.001*xray_edge(elem, 'K').energy\n l3edge = 0.001*xray_edge(elem, 'L3').energy\n l2edge = 0.001*xray_edge(elem, 'L3').energy\n if ((kedge < en and kedge > emin) or\n (l3edge < en and l3edge > emin) or\n (l2edge < en and l2edge > emin)):\n if elem not in self.ptable.selected:\n self.ptable.onclick(label=elem)\n\n def onElems_FromROIS(self, event=None):\n for roi in self.mca.rois:\n words = roi.name.split()\n elem = words[0].title()\n if (elem in self.ptable.syms and\n elem not in self.ptable.selected):\n self.ptable.onclick(label=elem)\n self.onSetXrayEnergy()\n\n def onSetXrayEnergy(self, event=None):\n en = self.wids['en_xray'].GetValue()\n self.wids['en_max'].SetValue(en)\n self.wids['elastic_cen'].SetValue(en)\n self.wids['compton1_cen'].SetValue(en*0.975)\n self.wids['compton2_cen'].SetValue(en*0.950)\n emin = self.wids['en_min'].GetValue() * 1.25\n\n self.ptable.on_clear_all()\n for roi in self.mca.rois:\n words = roi.name.split()\n elem = words[0].title()\n kedge = l3edge = l2edge = 0.0\n try:\n kedge = 0.001*xray_edge(elem, 'K').energy\n l3edge = 0.001*xray_edge(elem, 'L3').energy\n l2edge = 0.001*xray_edge(elem, 'L3').energy\n except:\n pass\n if ((kedge < en and kedge > emin) or\n (l3edge < en and l3edge > emin) or\n (l2edge < en and l2edge > emin)):\n if elem not in self.ptable.selected:\n self.ptable.onclick(label=elem)\n\n def onShowBgr(self, event=None):\n mca = self.mca\n parent = self.parent\n width = self.wids['bgr_width'].GetValue()\n expon = int(self.wids['bgr_expon'].GetStringSelection())\n\n xrf_background(energy=mca.energy, counts=mca.counts, group=mca,\n width=width, exponent=expon, _larch=parent.larch)\n\n mca.bgr_width = width\n mca.bgr_expoent = expon\n parent.plotmca(mca)\n parent.oplot(mca.energy, mca.bgr, label='background',\n color=parent.conf.bgr_color, linewidth=2, style='--')\n\n def onDetMaterial(self, event=None):\n dmat = self.wids['det_mat'].GetStringSelection()\n if dmat not in FanoFactors:\n dmat = 'Si'\n self.wids['det_efano'].SetLabel('E_Fano= %.4e' % FanoFactors[dmat])\n\n def onFilterMaterial(self, evt=None, index=1):\n name = evt.GetString()\n den = self.materials_data.get(name, (None, 1.0))[1]\n t = 'filter%d' % (index)\n thick = self.wids['%s_thk'%t]\n if den < 0.1 and thick.GetValue() < 0.1:\n thick.SetValue(10.0)\n thick.SetIncrement(0.5)\n elif den > 0.1 and thick.GetValue() < 1.e-5:\n thick.SetValue(0.0250)\n thick.SetIncrement(0.005)\n\n def onUseCurrentMaterialAsFilter(self, evt=None, layer=1):\n name = self.selected_material\n density = self.materials_data.get(name, (None, 1.0))[1]\n if layer is not None and len(name)>0:\n self.wids['matrix%d_den'% layer].SetValue(density)\n self.wids['matrix%d_mat'% layer].SetValue(name)\n\n def onSelectMaterial(self, evt=None):\n if self.owids['materials'] is None:\n return\n item = self.owids['materials'].GetSelectedRow()\n name = None\n if item > -1:\n name = list(self.materials_data.keys())[item]\n self.selected_material = name\n\n for i in range(NMATRIX):\n t = 'matrix%d' % (i+1)\n self.wids['%s_btn'%t].Enable(name is not None)\n if name is not None:\n self.wids['%s_btn'%t].SetLabel('Use %s' % name)\n\n def onUpdateFilterList(self, evt=None):\n flist = ['None']\n for i in range(len(self.materials_data)):\n if self.owids['materials'].GetToggleValue(i, 3): # is filter\n flist.append(self.owids['materials'].GetTextValue(i, 0))\n\n for i in range(NFILTERS):\n t = 'filter%d' % (i+1)\n choice = self.wids['%s_mat'%t]\n cur = choice.GetStringSelection()\n choice.Clear()\n choice.SetChoices(flist)\n if cur in flist:\n choice.SetStringSelection(cur)\n else:\n choice.SetSelection(0)\n\n def onAddMaterial(self, evt=None):\n name = self.owids['newmat_name'].GetValue()\n formula = self.owids['newmat_form'].GetValue()\n density = self.owids['newmat_dens'].GetValue()\n add = len(name) > 0 and len(formula)>0\n if add and name in self.materials_data:\n add = (Popup(self,\n \"Overwrite definition of '%s'?\" % name,\n 'Re-define material?',\n style=wx.OK|wx.CANCEL)==wx.ID_OK)\n if add:\n irow = list(self.materials_data.keys()).index(name)\n self.owids['materials'].DeleteItem(irow)\n if add:\n add_material(name, formula, density)\n self.materials_data[name] = (formula, density)\n self.selected_material = name\n self.owids['materials'].AppendItem((name, formula,\n \"%9.6f\"%density,\n False))\n\n def onElemSelect(self, event=None, elem=None):\n self.ptable.tsym.SetLabel('')\n self.ptable.title.SetLabel('%d elements selected' %\n len(self.ptable.selected))\n\n def onUseBackground(self, event=None):\n use_bgr = self.wids['bgr_use'].IsChecked()\n self.wids['bgr_width'].Enable(use_bgr)\n self.wids['bgr_expon'].Enable(use_bgr)\n self.wids['bgr_show'].Enable(use_bgr)\n\n def onUsePileupEscape(self, event=None):\n puse = self.wids['pileup_use'].IsChecked()\n self.wids['pileup_amp'].Enable(puse)\n self.wids['pileup_amp_vary'].Enable(puse)\n\n puse = self.wids['escape_use'].IsChecked()\n self.wids['escape_amp'].Enable(puse)\n self.wids['escape_amp_vary'].Enable(puse)\n\n\n def onUsePeak(self, event=None, name=None, value=None):\n if value is None and event is not None:\n value = event.IsChecked()\n if name is None:\n return\n for a in ('cen', 'step', 'tail', 'sigma', 'beta'):\n self.wids['%s_%s'%(name, a)].Enable(value)\n varwid = self.wids.get('%s_%s_vary'%(name, a), None)\n if varwid is not None:\n varwid.Enable(value)\n\n def build_model(self, match_amplitudes=True):\n \"\"\"build xrf_model from form settings\"\"\"\n vars = {'Vary':'True', 'Fix': 'False', 'True':True, 'False': False}\n opts = {}\n for key, wid in self.wids.items():\n if hasattr(wid, 'GetValue'):\n val = wid.GetValue()\n elif hasattr(wid, 'IsChecked'):\n val = wid.IsChecked()\n elif isinstance(wid, Choice):\n val = wid.GetStringSelection()\n elif hasattr(wid, 'GetStringSelection'):\n val = wid.GetStringSelection()\n else:\n opts[key] = '????'\n if isinstance(val, str) and val.title() in vars:\n val = vars[val.title()]\n opts[key] = val\n opts['count_time'] = getattr(self.mca, 'real_time', 1.0)\n if opts['count_time'] is None:\n opts['count_time'] = 1.0\n\n script = [xrfmod_setup.format(**opts)]\n\n for peak in ('Elastic', 'Compton1', 'Compton2'):\n t = peak.lower()\n if opts['%s_use'% t]:\n d = {'peakname': t}\n d['_cen'] = opts['%s_cen'%t]\n d['vcen'] = opts['%s_cen_vary'%t]\n d['_step'] = opts['%s_step'%t]\n d['vstep'] = opts['%s_step_vary'%t]\n d['_tail'] = opts['%s_tail'%t]\n d['vtail'] = opts['%s_tail_vary'%t]\n d['_beta'] = opts['%s_beta'%t]\n d['vbeta'] = opts['%s_beta_vary'%t]\n d['_sigma'] = opts['%s_sigma'%t]\n d['vsigma'] = opts['%s_sigma_vary'%t]\n script.append(xrfmod_scattpeak.format(**d))\n\n for i in range(NFILTERS):\n t = 'filter%d' % (i+1)\n f_mat = opts['%s_mat'%t]\n if f_mat not in (None, 'None') and int(1e6*opts['%s_thk'%t]) > 1:\n script.append(xrfmod_filter.format(name=f_mat,\n thick=opts['%s_thk'%t],\n vary=opts['%s_var'%t]))\n\n for i in range(NMATRIX):\n t = 'matrix%d' % (i+1)\n m_mat = opts['%s_mat'%t].strip()\n if len(m_mat) > 0 and int(1e6*opts['%s_thk'%t]) > 1:\n script.append(xrfmod_matrix.format(name=m_mat,\n thick=opts['%s_thk'%t],\n density=opts['%s_den'%t]))\n\n if opts.get('bgr_use', False) in ('True', True):\n bwid = self.wids['bgr_width'].GetValue()/1000.0\n bexp = int(self.wids['bgr_expon'].GetStringSelection())\n script.append(xrfmod_bgr.format(group=self.mcagroup, bwid=bwid, bexp=bexp))\n\n if opts['pileup_use'] in ('True', True):\n script.append(xrfmod_pileup.format(scale=opts['pileup_amp'],\n vary=opts['pileup_amp_vary']))\n\n if opts['escape_use'] in ('True', True):\n script.append(xrfmod_escape.format(scale=opts['escape_amp'],\n vary=opts['escape_amp_vary']))\n\n # sort elements selected on Periodic Table by Z\n elemz = []\n for elem in self.ptable.selected:\n elemz.append( 1 + self.ptable.syms.index(elem))\n elemz.sort()\n syms = [\"'%s'\" % self.ptable.syms[iz-1] for iz in sorted(elemz)]\n syms = '[%s]' % (', '.join(syms))\n script.append(xrfmod_elems.format(elemlist=syms))\n script.append(\"{group:s}.xrf_init = _xrfmodel.calc_spectrum({group:s}.energy)\")\n script = '\\n'.join(script)\n self.model_script = script.format(group=self.mcagroup)\n\n self._larch.eval(self.model_script)\n\n cmds = []\n self.xrfmod = self._larch.symtable.get_symbol('_xrfmodel')\n floor = 1.e-12*max(self.mca.counts)\n if match_amplitudes:\n total = 0.0 * self.mca.counts\n for name, parr in self.xrfmod.comps.items():\n nam = name.lower()\n try:\n imax = np.where(parr > 0.99*parr.max())[0][0]\n except: # probably means all counts are zero\n imax = int(len(parr)/2.0)\n scale = self.mca.counts[imax] / (parr[imax]+1.00)\n ampname = 'amp_%s' % nam\n if nam in ('elastic', 'compton1', 'compton2', 'compton',\n 'background', 'pileup', 'escape'):\n ampname = '%s_amp' % nam\n if nam in ('background', 'pileup', 'escape'):\n scale = 1.0\n paramval = self.xrfmod.params[ampname].value\n s = \"_xrfmodel.params['%s'].value = %.5f\" % (ampname, paramval*scale)\n cmds.append(s)\n parr *= scale\n parr[np.where(parr len(self.fit_history):\n self.nfit = 0\n return self.fit_history[self.nfit]\n\n def onChangeFitLabel(self, event=None):\n label = self.owids['fitlabel_txt'].GetValue()\n result = self.get_fitresult()\n result.label = label\n self.show_results()\n\n def onPlot(self, event=None):\n result = self.get_fitresult()\n xrfmod = self._larch.symtable.get_symbol('_xrfmodel')\n with_comps = self.owids['plot_comps'].IsChecked()\n spect = xrfmod.calc_spectrum(self.mca.energy,\n params=result.params)\n self.plot_model(model_spectrum=spect, with_comps=with_comps,\n label=result.label)\n\n def onSelectFit(self, evt=None):\n if self.owids['stats'] is None:\n return\n item = self.owids['stats'].GetSelectedRow()\n if item > -1:\n self.show_fitresult(nfit=item)\n\n def onSelectParameter(self, evt=None):\n if self.owids['params'] is None:\n return\n if not self.owids['params'].HasSelection():\n return\n item = self.owids['params'].GetSelectedRow()\n pname = self.owids['paramsdata'][item]\n\n cormin= self.owids['min_correl'].GetValue()\n self.owids['correl'].DeleteAllItems()\n\n result = self.get_fitresult()\n this = result.params[pname]\n if this.correl is not None:\n sort_correl = sorted(this.correl.items(), key=lambda it: abs(it[1]))\n for name, corval in reversed(sort_correl):\n if abs(corval) > cormin:\n self.owids['correl'].AppendItem((pname, name, \"% .4f\" % corval))\n\n def onAllCorrel(self, evt=None):\n result = self.get_fitresult()\n params = result.params\n parnames = list(params.keys())\n\n cormin= self.owids['min_correl'].GetValue()\n correls = {}\n for i, name in enumerate(parnames):\n par = params[name]\n if not par.vary:\n continue\n if hasattr(par, 'correl') and par.correl is not None:\n for name2 in parnames[i+1:]:\n if (name != name2 and name2 in par.correl and\n abs(par.correl[name2]) > cormin):\n correls[\"%s$$%s\" % (name, name2)] = par.correl[name2]\n\n sort_correl = sorted(correls.items(), key=lambda it: abs(it[1]))\n sort_correl.reverse()\n\n self.owids['correl'].DeleteAllItems()\n\n for namepair, corval in sort_correl:\n name1, name2 = namepair.split('$$')\n self.owids['correl'].AppendItem((name1, name2, \"% .4f\" % corval))\n\n def show_results(self):\n cur = self.get_fitresult()\n self.owids['stats'].DeleteAllItems()\n for i, res in enumerate(self.fit_history):\n args = [res.label]\n for attr in ('nvarys', 'nfev', 'chisqr', 'redchi', 'aic'):\n val = getattr(res, attr)\n if isinstance(val, int):\n val = '%d' % val\n else:\n val = gformat(val, 11)\n args.append(val)\n self.owids['stats'].AppendItem(tuple(args))\n self.owids['data_title'].SetLabel(\"%s: %.3f sec\" % (self.mca.filename, cur.count_time))\n self.owids['data_title2'].SetLabel(\"%s: %.3f sec\" % (self.mca.filename, cur.count_time))\n self.owids['fitlabel_txt'].SetValue(cur.label)\n self.show_fitresult(nfit=self.nfit)\n\n def show_fitresult(self, nfit=0, mca=None):\n if mca is not None:\n self.mca = mca\n result = self.get_fitresult(nfit=nfit)\n\n self.owids['data_title'].SetLabel(\"%s: %.3f sec\" % (self.mca.filename, result.count_time))\n self.owids['data_title2'].SetLabel(\"%s: %.3f sec\" % (self.mca.filename, result.count_time))\n self.result = result\n self.owids['fitlabel_txt'].SetValue(result.label)\n self.owids['params'].DeleteAllItems()\n self.owids['paramsdata'] = []\n for param in reversed(result.params.values()):\n pname = param.name\n try:\n val = gformat(param.value, 10)\n except (TypeError, ValueError):\n val = ' ??? '\n\n serr, perr = ' N/A ', ' N/A '\n if param.stderr is not None:\n serr = gformat(param.stderr, 10)\n try:\n perr = ' {:.2%}'.format(abs(param.stderr/param.value))\n except ZeroDivisionError:\n perr = '?'\n extra = ' '\n if param.expr is not None:\n extra = ' = %s ' % param.expr\n elif not param.vary:\n extra = ' (fixed)'\n elif param.init_value is not None:\n extra = gformat(param.init_value, 10)\n\n self.owids['params'].AppendItem((pname, val, serr, perr, extra))\n self.owids['paramsdata'].append(pname)\n self.Refresh()\n", "sub_path": "larch/wxlib/xrfdisplay_fitpeaks.py", "file_name": "xrfdisplay_fitpeaks.py", "file_ext": "py", "file_size_in_byte": 62139, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "59", "api": [{"api_name": "wx.dataview.DV_SINGLE", "line_number": 18, "usage_type": "attribute"}, {"api_name": "wx.dataview", "line_number": 18, "usage_type": "name"}, {"api_name": "wx.dataview.DV_VERT_RULES", "line_number": 18, "usage_type": "attribute"}, {"api_name": "wx.dataview.DV_ROW_LINES", "line_number": 18, "usage_type": "attribute"}, {"api_name": "xraydb.materials", "line_number": 47, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 48, "usage_type": "call"}, {"api_name": "xraydb.materials", "line_number": 51, "usage_type": "name"}, {"api_name": "xraydb.materials", "line_number": 52, "usage_type": "name"}, {"api_name": "wxutils.Choice", "line_number": 56, "usage_type": "call"}, {"api_name": "wx.Frame", "line_number": 137, "usage_type": "attribute"}, {"api_name": "xrfdisplay_utils.XRFGROUP", "line_number": 145, "usage_type": "argument"}, {"api_name": "xrfdisplay_utils.XRFGROUP", "line_number": 148, "usage_type": "name"}, {"api_name": "wxutils.GUIColors", "line_number": 160, "usage_type": "call"}, {"api_name": "wx.Frame.__init__", "line_number": 161, "usage_type": "call"}, {"api_name": "wx.Frame", "line_number": 161, "usage_type": "attribute"}, {"api_name": "wx.DEFAULT_FRAME_STYLE", "line_number": 162, "usage_type": "attribute"}, {"api_name": "notebooks.flatnotebook", "line_number": 174, "usage_type": "call"}, {"api_name": "wx.BoxSizer", "line_number": 177, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 177, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 178, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 178, "usage_type": "attribute"}, {"api_name": "wx.Panel", "line_number": 180, "usage_type": "call"}, {"api_name": "wx.BoxSizer", "line_number": 182, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 182, "usage_type": "attribute"}, {"api_name": "wxutils.LEFT", "line_number": 184, "usage_type": "argument"}, {"api_name": "wxutils.Button", "line_number": 183, "usage_type": "call"}, {"api_name": "wxutils.LEFT", "line_number": 186, "usage_type": "argument"}, {"api_name": "wxutils.Button", "line_number": 185, "usage_type": "call"}, {"api_name": "wxutils.pack", "line_number": 188, "usage_type": "call"}, {"api_name": "wxutils.CEN", "line_number": 189, "usage_type": "argument"}, {"api_name": "wxutils.pack", "line_number": 191, "usage_type": "call"}, {"api_name": "wxutils.GridPanel", "line_number": 204, "usage_type": "call"}, {"api_name": "periodictable.PeriodicTablePanel", "line_number": 206, "usage_type": "call"}, {"api_name": "wxutils.FloatSpin", "line_number": 211, "usage_type": "call"}, {"api_name": "wxutils.FloatSpin", "line_number": 214, "usage_type": "call"}, {"api_name": "wxutils.FloatSpin", "line_number": 217, "usage_type": "call"}, {"api_name": "wxutils.FloatSpin", "line_number": 221, "usage_type": "call"}, {"api_name": "wxutils.Button", "line_number": 230, "usage_type": "call"}, {"api_name": "wxutils.Button", "line_number": 233, "usage_type": "call"}, {"api_name": "wxutils.Button", "line_number": 235, "usage_type": "call"}, {"api_name": "wx.CallAfter", "line_number": 237, "usage_type": "call"}, {"api_name": "wxutils.HLine", "line_number": 268, "usage_type": "call"}, {"api_name": "wxutils.Check", "line_number": 281, "usage_type": "call"}, {"api_name": "wxutils.FloatSpin", "line_number": 288, "usage_type": "call"}, {"api_name": "wxutils.FloatSpin", "line_number": 290, "usage_type": "call"}, {"api_name": "wxutils.FloatSpin", "line_number": 293, "usage_type": "call"}, {"api_name": "wxutils.FloatSpin", "line_number": 296, "usage_type": "call"}, {"api_name": "wxutils.FloatSpin", "line_number": 299, "usage_type": "call"}, {"api_name": "wxutils.HLine", "line_number": 330, "usage_type": "call"}, {"api_name": "wxutils.GridPanel", "line_number": 351, "usage_type": "call"}, {"api_name": "wxutils.LEFT", "line_number": 351, "usage_type": "name"}, {"api_name": "wxutils.HLine", "line_number": 354, "usage_type": "call"}, {"api_name": "wxutils.Check", "line_number": 370, "usage_type": "call"}, {"api_name": "wxutils.FloatSpin", "line_number": 372, "usage_type": "call"}, {"api_name": "wxutils.Check", "line_number": 376, "usage_type": "call"}, {"api_name": "wxutils.FloatSpin", "line_number": 378, "usage_type": "call"}, {"api_name": "wxutils.FloatSpin", "line_number": 386, "usage_type": "call"}, {"api_name": "wxutils.FloatSpin", "line_number": 389, "usage_type": "call"}, {"api_name": "wxutils.Check", "line_number": 393, "usage_type": "call"}, {"api_name": "wxutils.Choice", "line_number": 395, "usage_type": "call"}, {"api_name": "wxutils.FloatSpin", "line_number": 399, "usage_type": "call"}, {"api_name": "wxutils.FloatSpin", "line_number": 407, "usage_type": "call"}, {"api_name": "wxutils.FloatSpin", "line_number": 409, "usage_type": "call"}, {"api_name": "wxutils.FloatSpin", "line_number": 410, "usage_type": "call"}, {"api_name": "wxutils.FloatSpin", "line_number": 413, "usage_type": "call"}, {"api_name": "wxutils.SimpleText", "line_number": 414, "usage_type": "call"}, {"api_name": "xrf.FanoFactors", "line_number": 415, "usage_type": "name"}, {"api_name": "wxutils.FloatSpin", "line_number": 418, "usage_type": "call"}, {"api_name": "wxutils.FloatSpin", "line_number": 419, "usage_type": "call"}, {"api_name": "wxutils.GridPanel", "line_number": 478, "usage_type": "call"}, {"api_name": "wxutils.LEFT", "line_number": 478, "usage_type": "name"}, {"api_name": "wxutils.CEN", "line_number": 482, "usage_type": "name"}, {"api_name": "wxutils.Choice", "line_number": 487, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 489, "usage_type": "call"}, {"api_name": "wxutils.FloatSpin", "line_number": 490, "usage_type": "call"}, {"api_name": "wxutils.HLine", "line_number": 510, "usage_type": "call"}, {"api_name": "wxutils.CEN", "line_number": 514, "usage_type": "name"}, {"api_name": "wx.TextCtrl", "line_number": 518, "usage_type": "call"}, {"api_name": "wxutils.FloatSpin", "line_number": 519, "usage_type": "call"}, {"api_name": "wxutils.FloatSpin", "line_number": 520, "usage_type": "call"}, {"api_name": "wxutils.Button", "line_number": 521, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 522, "usage_type": "call"}, {"api_name": "wxutils.LCEN", "line_number": 525, "usage_type": "name"}, {"api_name": "wxutils.HLine", "line_number": 531, "usage_type": "call"}, {"api_name": "wxutils.Button", "line_number": 535, "usage_type": "call"}, {"api_name": "wx.dataview.DataViewListCtrl", "line_number": 539, "usage_type": "call"}, {"api_name": "wx.dataview", "line_number": 539, "usage_type": "name"}, {"api_name": "wx.dataview.EVT_DATAVIEW_SELECTION_CHANGED", "line_number": 540, "usage_type": "attribute"}, {"api_name": "wx.dataview", "line_number": 540, "usage_type": "name"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 549, "usage_type": "attribute"}, {"api_name": "xraydb.materials._read_materials_db", "line_number": 556, "usage_type": "call"}, {"api_name": "xraydb.materials", "line_number": 556, "usage_type": "name"}, {"api_name": "wx.TextCtrl", "line_number": 564, "usage_type": "call"}, {"api_name": "wxutils.FloatSpin", "line_number": 565, "usage_type": "call"}, {"api_name": "wx.TextCtrl", "line_number": 566, "usage_type": "call"}, {"api_name": "wxutils.Button", "line_number": 575, "usage_type": "call"}, {"api_name": "wx.GridBagSizer", "line_number": 581, "usage_type": "call"}, {"api_name": "wx.lib.scrolledpanel.ScrolledPanel", "line_number": 582, "usage_type": "call"}, {"api_name": "wx.lib.scrolledpanel", "line_number": 582, "usage_type": "name"}, {"api_name": "wxutils.SimpleText", "line_number": 585, "usage_type": "call"}, {"api_name": "wxutils.Font", "line_number": 585, "usage_type": "call"}, {"api_name": "wxutils.LCEN", "line_number": 586, "usage_type": "name"}, {"api_name": "wxutils.SimpleText", "line_number": 588, "usage_type": "call"}, {"api_name": "wxutils.Font", "line_number": 588, "usage_type": "call"}, {"api_name": "wxutils.LCEN", "line_number": 589, "usage_type": "name"}, {"api_name": "wxutils.SimpleText", "line_number": 591, "usage_type": "call"}, {"api_name": "wx.TextCtrl", "line_number": 592, "usage_type": "call"}, {"api_name": "wxutils.Button", "line_number": 593, "usage_type": "call"}, {"api_name": "wxutils.Check", "line_number": 597, "usage_type": "call"}, {"api_name": "wxutils.Button", "line_number": 598, "usage_type": "call"}, {"api_name": "wxutils.Button", "line_number": 601, "usage_type": "call"}, {"api_name": "wxutils.SetTip", "line_number": 603, "usage_type": "call"}, {"api_name": "wxutils.Button", "line_number": 605, "usage_type": "call"}, {"api_name": "wxutils.SetTip", "line_number": 607, "usage_type": "call"}, {"api_name": "wxutils.LCEN", "line_number": 610, "usage_type": "argument"}, {"api_name": "wxutils.LCEN", "line_number": 611, "usage_type": "argument"}, {"api_name": "wxutils.LCEN", "line_number": 614, "usage_type": "argument"}, {"api_name": "wxutils.LCEN", "line_number": 615, "usage_type": "argument"}, {"api_name": "wxutils.LCEN", "line_number": 616, "usage_type": "argument"}, {"api_name": "wxutils.LCEN", "line_number": 617, "usage_type": "argument"}, {"api_name": "wxutils.LCEN", "line_number": 620, "usage_type": "argument"}, {"api_name": "wxutils.LCEN", "line_number": 621, "usage_type": "argument"}, {"api_name": "wxutils.LCEN", "line_number": 622, "usage_type": "argument"}, {"api_name": "wxutils.LCEN", "line_number": 626, "usage_type": "argument"}, {"api_name": "wxutils.HLine", "line_number": 626, "usage_type": "call"}, {"api_name": "wxutils.SimpleText", "line_number": 629, "usage_type": "call"}, {"api_name": "wxutils.Font", "line_number": 629, "usage_type": "call"}, {"api_name": "wxutils.LCEN", "line_number": 630, "usage_type": "name"}, {"api_name": "wxutils.LCEN", "line_number": 631, "usage_type": "argument"}, {"api_name": "wx.dataview.DataViewListCtrl", "line_number": 633, "usage_type": "call"}, {"api_name": "wx.dataview", "line_number": 633, "usage_type": "name"}, {"api_name": "wx.dataview.EVT_DATAVIEW_SELECTION_CHANGED", "line_number": 634, "usage_type": "attribute"}, {"api_name": "wx.dataview", "line_number": 634, "usage_type": "name"}, {"api_name": "wx.ALIGN_RIGHT", "line_number": 644, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER", "line_number": 646, "usage_type": "attribute"}, {"api_name": "wxutils.LCEN", "line_number": 652, "usage_type": "argument"}, {"api_name": "wxutils.LCEN", "line_number": 655, "usage_type": "argument"}, {"api_name": "wxutils.HLine", "line_number": 655, "usage_type": "call"}, {"api_name": "wxutils.SimpleText", "line_number": 658, "usage_type": "call"}, {"api_name": "wxutils.Font", "line_number": 658, "usage_type": "call"}, {"api_name": "wxutils.LCEN", "line_number": 659, "usage_type": "name"}, {"api_name": "wxutils.LCEN", "line_number": 660, "usage_type": "argument"}, {"api_name": "wx.dataview.DataViewListCtrl", "line_number": 662, "usage_type": "call"}, {"api_name": "wx.dataview", "line_number": 662, "usage_type": "name"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 672, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_RIGHT", "line_number": 674, "usage_type": "attribute"}, {"api_name": "wx.dataview.EVT_DATAVIEW_SELECTION_CHANGED", "line_number": 679, "usage_type": "attribute"}, {"api_name": "wx.dataview", "line_number": 679, "usage_type": "name"}, {"api_name": "wxutils.LCEN", "line_number": 682, "usage_type": "argument"}, {"api_name": "wxutils.LCEN", "line_number": 685, "usage_type": "argument"}, {"api_name": "wxutils.HLine", "line_number": 685, "usage_type": "call"}, {"api_name": "wxutils.SimpleText", "line_number": 688, "usage_type": "call"}, {"api_name": "wxutils.Font", "line_number": 688, "usage_type": "call"}, {"api_name": "wxutils.LCEN", "line_number": 689, "usage_type": "name"}, {"api_name": "wxutils.Button", "line_number": 691, "usage_type": "call"}, {"api_name": "wxutils.FloatSpin", "line_number": 694, "usage_type": "call"}, {"api_name": "wxutils.SimpleText", "line_number": 698, "usage_type": "call"}, {"api_name": "wxutils.LCEN", "line_number": 699, "usage_type": "argument"}, {"api_name": "wxutils.LCEN", "line_number": 700, "usage_type": "argument"}, {"api_name": "wxutils.LCEN", "line_number": 701, "usage_type": "argument"}, {"api_name": "wxutils.LCEN", "line_number": 702, "usage_type": "argument"}, {"api_name": "wx.dataview.DataViewListCtrl", "line_number": 704, "usage_type": "call"}, {"api_name": "wx.dataview", "line_number": 704, "usage_type": "name"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 713, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_RIGHT", "line_number": 715, "usage_type": "attribute"}, {"api_name": "wxutils.LCEN", "line_number": 720, "usage_type": "argument"}, {"api_name": "wxutils.pack", "line_number": 722, "usage_type": "call"}, {"api_name": "wx.GridBagSizer", "line_number": 727, "usage_type": "call"}, {"api_name": "wx.lib.scrolledpanel.ScrolledPanel", "line_number": 728, "usage_type": "call"}, {"api_name": "wx.lib.scrolledpanel", "line_number": 728, "usage_type": "name"}, {"api_name": "wxutils.SimpleText", "line_number": 730, "usage_type": "call"}, {"api_name": "wxutils.Font", "line_number": 730, "usage_type": "call"}, {"api_name": "wxutils.LCEN", "line_number": 731, "usage_type": "name"}, {"api_name": "wxutils.SimpleText", "line_number": 732, "usage_type": "call"}, {"api_name": "wxutils.Font", "line_number": 732, "usage_type": "call"}, {"api_name": "wxutils.LCEN", "line_number": 733, "usage_type": "name"}, {"api_name": "wx.dataview.DataViewListCtrl", "line_number": 735, "usage_type": "call"}, {"api_name": "wx.dataview", "line_number": 735, "usage_type": "name"}, {"api_name": "wx.ALIGN_RIGHT", "line_number": 744, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 746, "usage_type": "attribute"}, {"api_name": "wxutils.Choice", "line_number": 751, "usage_type": "call"}, {"api_name": "wxutils.Choice", "line_number": 755, "usage_type": "call"}, {"api_name": "wxutils.FloatSpin", "line_number": 757, "usage_type": "call"}, {"api_name": "wxutils.Choice", "line_number": 760, "usage_type": "call"}, {"api_name": "wxutils.FloatCtrl", "line_number": 761, "usage_type": "call"}, {"api_name": "wxutils.Button", "line_number": 764, "usage_type": "call"}, {"api_name": "wxutils.LCEN", "line_number": 768, "usage_type": "argument"}, {"api_name": "wxutils.LCEN", "line_number": 769, "usage_type": "argument"}, {"api_name": "wxutils.LCEN", "line_number": 771, "usage_type": "argument"}, {"api_name": "wxutils.SimpleText", "line_number": 771, "usage_type": "call"}, {"api_name": "wxutils.LCEN", "line_number": 772, "usage_type": "argument"}, {"api_name": "wxutils.LCEN", "line_number": 775, "usage_type": "argument"}, {"api_name": "wxutils.SimpleText", "line_number": 775, "usage_type": "call"}, {"api_name": "wxutils.LCEN", "line_number": 776, "usage_type": "argument"}, {"api_name": "wxutils.LCEN", "line_number": 777, "usage_type": "argument"}, {"api_name": "wxutils.SimpleText", "line_number": 777, "usage_type": "call"}, {"api_name": "wxutils.LCEN", "line_number": 778, "usage_type": "argument"}, {"api_name": "wxutils.LCEN", "line_number": 779, "usage_type": "argument"}, {"api_name": "wxutils.LCEN", "line_number": 782, "usage_type": "argument"}, {"api_name": "wxutils.SimpleText", "line_number": 782, "usage_type": "call"}, {"api_name": "wxutils.LCEN", "line_number": 783, "usage_type": "argument"}, {"api_name": "wxutils.LCEN", "line_number": 786, "usage_type": "argument"}, {"api_name": "wxutils.LCEN", "line_number": 789, "usage_type": "argument"}, {"api_name": "wxutils.pack", "line_number": 791, "usage_type": "call"}, {"api_name": "time.time", "line_number": 796, "usage_type": "call"}, {"api_name": "time.time", "line_number": 798, "usage_type": "call"}, {"api_name": "xraydb.atomic_number", "line_number": 817, "usage_type": "call"}, {"api_name": "time.time", "line_number": 829, "usage_type": "call"}, {"api_name": "time.time", "line_number": 831, "usage_type": "call"}, {"api_name": "xraydb.atomic_number", "line_number": 849, "usage_type": "call"}, {"api_name": "wxutils.fix_filename", "line_number": 865, "usage_type": "call"}, {"api_name": "wxutils.FileSave", "line_number": 867, "usage_type": "call"}, {"api_name": "peakutils.peak.indexes", "line_number": 920, "usage_type": "call"}, {"api_name": "peakutils.peak", "line_number": 920, "usage_type": "name"}, {"api_name": "xraydb.atomic_symbol", "line_number": 924, "usage_type": "call"}, {"api_name": "xraydb.xray_line", "line_number": 925, "usage_type": "call"}, {"api_name": "xraydb.xray_line", "line_number": 926, "usage_type": "call"}, {"api_name": "xraydb.xray_edge", "line_number": 947, "usage_type": "call"}, {"api_name": "xraydb.xray_edge", "line_number": 948, "usage_type": "call"}, {"api_name": "xraydb.xray_edge", "line_number": 949, "usage_type": "call"}, {"api_name": "xraydb.xray_edge", "line_number": 979, "usage_type": "call"}, {"api_name": "xraydb.xray_edge", "line_number": 980, "usage_type": "call"}, {"api_name": "xraydb.xray_edge", "line_number": 981, "usage_type": "call"}, {"api_name": "xrf.xrf_background", "line_number": 996, "usage_type": "call"}, {"api_name": "xrf.FanoFactors", "line_number": 1007, "usage_type": "name"}, {"api_name": "xrf.FanoFactors", "line_number": 1009, "usage_type": "name"}, {"api_name": "wxutils.Popup", "line_number": 1068, "usage_type": "call"}, {"api_name": "wx.OK", "line_number": 1071, "usage_type": "attribute"}, {"api_name": "wx.CANCEL", "line_number": 1071, "usage_type": "attribute"}, {"api_name": "wx.ID_OK", "line_number": 1071, "usage_type": "attribute"}, {"api_name": "xraydb.add_material", "line_number": 1076, "usage_type": "call"}, {"api_name": "wxutils.Choice", "line_number": 1124, "usage_type": "argument"}, {"api_name": "peakutils.peak", "line_number": 1139, "usage_type": "name"}, {"api_name": "peakutils.peak.lower", "line_number": 1140, "usage_type": "call"}, {"api_name": "peakutils.peak", "line_number": 1140, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 1206, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1220, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 1292, "usage_type": "call"}, {"api_name": "wxutils.fix_filename", "line_number": 1301, "usage_type": "call"}, {"api_name": "wxutils.FileSave", "line_number": 1303, "usage_type": "call"}, {"api_name": "wxutils.fix_filename", "line_number": 1313, "usage_type": "call"}, {"api_name": "wxutils.FileSave", "line_number": 1315, "usage_type": "call"}, {"api_name": "lmfit.printfuncs.gformat", "line_number": 1337, "usage_type": "call"}, {"api_name": "lmfit.printfuncs.gformat", "line_number": 1338, "usage_type": "call"}, {"api_name": "lmfit.printfuncs.gformat", "line_number": 1339, "usage_type": "call"}, {"api_name": "lmfit.printfuncs.gformat", "line_number": 1340, "usage_type": "call"}, {"api_name": "lmfit.printfuncs.gformat", "line_number": 1341, "usage_type": "call"}, {"api_name": "lmfit.printfuncs.gformat", "line_number": 1342, "usage_type": "call"}, {"api_name": "lmfit.printfuncs.gformat", "line_number": 1343, "usage_type": "call"}, {"api_name": "lmfit.printfuncs.gformat", "line_number": 1345, "usage_type": "call"}, {"api_name": "lmfit.printfuncs.gformat", "line_number": 1437, "usage_type": "call"}, {"api_name": "lmfit.printfuncs.gformat", "line_number": 1459, "usage_type": "call"}, {"api_name": "lmfit.printfuncs.gformat", "line_number": 1465, "usage_type": "call"}, {"api_name": "lmfit.printfuncs.gformat", "line_number": 1476, "usage_type": "call"}]}