diff --git "a/1231.jsonl" "b/1231.jsonl" new file mode 100644--- /dev/null +++ "b/1231.jsonl" @@ -0,0 +1,218 @@ +{"seq_id": "7031875178", "text": "import eel\nimport serial\neel.init('web')\n\nport = \"COM6\"\ncereal = serial.Serial(port, 9600, timeout=0)\n\ndef readMhorcel():\n\t#this is the function to handle the serial thread\n\tglobal cereal\n\twhile True:\n\t\tprint('In readMhorcel-thread')\n\t\tdata = cereal.read(256).decode('ascii')\n\t\tprint('after read')\n\t\tif len(data) > 0:\n\t\t\tprint(data)\n\t\t\teel.renderData(data)\n\t\teel.sleep(5)\n\n@eel.expose\ndef sendMhorcel(data):\n\tglobal cereal\n\ttry:\n\t\tcereal.write(data.encode())\n\texcept Exception as e:\n\t\tprint(e)\n\n\n\noptions = {\n 'host': 'localhost',\n 'port': 8080,\n 'block': False\n}\n\neel.start('main.html', options, block=False)\neel.spawn(readMhorcel)\n\nwhile True:\n\tprint('in main thread')\n\teel.sleep(2)\n", "repo_name": "MhoSciences/MhoGooey", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 706, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "eel.init", "line_number": 3, "usage_type": "call"}, {"api_name": "serial.Serial", "line_number": 6, "usage_type": "call"}, {"api_name": "eel.renderData", "line_number": 17, "usage_type": "call"}, {"api_name": "eel.sleep", "line_number": 18, "usage_type": "call"}, {"api_name": "eel.expose", "line_number": 20, "usage_type": "attribute"}, {"api_name": "eel.start", "line_number": 36, "usage_type": "call"}, {"api_name": "eel.spawn", "line_number": 37, "usage_type": "call"}, {"api_name": "eel.sleep", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "34876406772", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('hello', '0004_person_image'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Publication',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),\n ('year', models.IntegerField()),\n ('title', models.CharField(max_length=100)),\n ('publisher', models.CharField(max_length=100)),\n ],\n ),\n ]\n", "repo_name": "Dilik/Responsive_Web_Development", "sub_path": "Web development training/djdemo/hello/migrations/0005_publication.py", "file_name": "0005_publication.py", "file_ext": "py", "file_size_in_byte": 642, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}]} +{"seq_id": "73546122238", "text": "# -*- coding: utf-8 -*-\nfrom setuptools import setup, find_packages\n\nVERSION = '0.4.3'\n\nsetup(\n name='easybio', # package name\n version=VERSION, # package version\n author='Lei Cui',\n author_email='cuilei798@qq.com',\n maintainer='Lei Cui',\n maintainer_email='cuilei798@qq.com',\n license='MIT License',\n platforms=[\"linux\"],\n url='https://github.com/xleizi/easyBio_conda',\n\n description='The purpose of the creation of this package is to make bioinformatics analysis simpler.',\n long_description=open('README.md').read(),\n packages=find_packages(),\n zip_safe=False,\n entry_points={\n 'console_scripts': [\n 'easyBio=easyBio.easyBio:main',\n 'easydownloadSRA=easyBio.downloadSRA:main',\n 'easysplitSRA=easyBio.splitSRA:main',\n 'easychangeSRAName=easyBio.changeSRAName:main',\n 'easycellranger=easyBio.run_cellranger:main',\n 'easyscGEOpipline=easyBio.pipline:main', \n 'easyVelocyto=easyBio.runvelocyto:main',\n 'easyscGSEpipline=easyBio.gsaPipline:main',\n 'easyInstallConda=easyBio.installConda:main',\n ]\n },\n install_requires=[\n # 'biopython',\n 'threadpool',\n 'requests',\n 'pandas'\n # 'velocyto',\n # 'psutil',\n # 'Cython'\n # Add more dependencies here\n ],\n package_data={\n 'Utils': ['Utils/*']\n },\n classifiers=[\n 'Operating System :: OS Independent',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3'\n ],\n python_requires='>=3',\n)\n", "repo_name": "xleizi/easyBio_conda", "sub_path": "easybio_conda/setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1620, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "97", "api": [{"api_name": "setuptools.setup", "line_number": 6, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "23289791291", "text": "#!/usr/bin/env python\nimport sys, os, torch, h5py\nfrom transformers import BertModel, BertTokenizer\nfrom nlptools.utils import zdump\n\nmodel_path = sys.argv[1] if len(sys.argv) > 1 else \".\"\nmodel_name = \"bert-base-uncased\"\nvocab_name = os.path.join(model_path, 'vocab')\nweight_path = os.path.join(model_path, '{}.h5py'.format(model_name))\nword2idx_path = os.path.join(model_path, '{}.lookup'.format(model_name))\n\nmodel = BertModel.from_pretrained(model_name)\n\nweights = model.embeddings.word_embeddings.weight.detach().numpy()\n\ntokenizer = BertTokenizer.from_pretrained(model_name)\nword2idx = tokenizer.vocab\n\nprint(weights.shape)\nprint(len(tokenizer.vocab))\n\nif os.path.exists(weight_path):\n os.remove(weight_path)\n\nwith h5py.File(weight_path, 'w') as h5file:\n h5file.create_dataset(\"word2vec\", data=weights)\n\nzdump(word2idx, word2idx_path)\n\n\n", "repo_name": "zhupengjia/simple_kbqa", "sub_path": "example/bert_to_wordvec.py", "file_name": "bert_to_wordvec.py", "file_ext": "py", "file_size_in_byte": 849, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "sys.argv", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "transformers.BertModel.from_pretrained", "line_number": 12, "usage_type": "call"}, {"api_name": "transformers.BertModel", "line_number": 12, "usage_type": "name"}, {"api_name": "transformers.BertTokenizer.from_pretrained", "line_number": 16, "usage_type": "call"}, {"api_name": "transformers.BertTokenizer", "line_number": 16, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 23, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 25, "usage_type": "call"}, {"api_name": "nlptools.utils.zdump", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "8035637881", "text": "import argparse\nimport os\nimport sys\nsys.path.append(\".\")\nfrom datetime import datetime\nfrom orientation.dataset import Dataset\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--method\", type=str,default='mlp', choices=[\n \"mlp\", # ml-based MlpClassifier\n ])\n parser.add_argument(\"--subset\", type=str, choices=[\n \"all\", \"train\", \"test\"\n ], default=\"train\")\n parser.add_argument(\"--work_dir\", type=str,\n default=os.path.join(\"workdir\", datetime.now().strftime(r\"%Y%m%d%H%M%S\")))\n return parser.parse_args()\n\n\ndef get_hook(args):\n if args.method == \"mlp\":\n from orientation.parse.ml_based import MlpClassifier\n mlp = MlpClassifier(args)\n return mlp.train\n raise NotImplementedError(args.method)\n\n\ndef get_dataset(args):\n return Dataset(args.subset)\n\n\ndef main():\n args = get_args()\n print(args)\n hook = get_hook(args)\n os.makedirs(args.work_dir, exist_ok=True)\n dataset = get_dataset(args)\n hook(dataset)\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "ImNotRoy/orientation", "sub_path": "tools/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 1060, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "sys.path.append", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 18, "usage_type": "name"}, {"api_name": "orientation.parse.ml_based.MlpClassifier", "line_number": 25, "usage_type": "call"}, {"api_name": "orientation.dataset.Dataset", "line_number": 31, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "39420615740", "text": "from __future__ import annotations\n\nfrom dazl import Party\nfrom dazl.client.api import AIOPartyClient, SimplePartyClient\n\n\ndef test_api_consistency():\n from dazl.client._network_client_impl import _NetworkImpl\n from dazl.client._party_client_impl import _PartyClientImpl\n\n impl = _PartyClientImpl(_NetworkImpl(), Party(\"party\"))\n\n apc = AIOPartyClient(impl)\n tpc = SimplePartyClient(impl)\n\n callable_apcs = {\n key: getattr(apc, key)\n for key in dir(apc)\n if not key.startswith(\"_\") and callable(getattr(apc, key))\n }\n callable_tpcs = {\n key: getattr(tpc, key)\n for key in dir(tpc)\n if not key.startswith(\"_\") and callable(getattr(tpc, key))\n }\n\n assert sorted(set(callable_apcs.keys())) == sorted(set(callable_tpcs.keys()))\n", "repo_name": "digital-asset/dazl-client", "sub_path": "python/tests/unit/test_api_consistency.py", "file_name": "test_api_consistency.py", "file_ext": "py", "file_size_in_byte": 796, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "97", "api": [{"api_name": "dazl.client._party_client_impl._PartyClientImpl", "line_number": 11, "usage_type": "call"}, {"api_name": "dazl.client._network_client_impl._NetworkImpl", "line_number": 11, "usage_type": "call"}, {"api_name": "dazl.Party", "line_number": 11, "usage_type": "call"}, {"api_name": "dazl.client.api.AIOPartyClient", "line_number": 13, "usage_type": "call"}, {"api_name": "dazl.client.api.SimplePartyClient", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "41959383904", "text": "# coding:utf8\n\nfrom utils.timeutil import getCurrentYear, getCurrentQuarter\nfrom base.profit import getProfit\nfrom base.stock import getAStocks\n\n# 盈利在增长\ndef growFilter(stocks,years):\n currentYear = getCurrentYear()\n currentQuarter = getCurrentQuarter()\n\n lastQuarter = currentQuarter - 1 # 最近上个季度盈利数据\n\n if lastQuarter <= 0:\n lastQuarter = 4\n currentYear = currentYear -1\n\n print(\"use %s year %s quarter profit data\"%(currentYear,lastQuarter))\n\n for index,stock in stocks.iterrows():\n code = stock['symbol']\n area = \"sz\" if stock['ts_code'].find(\"SZ\") > 0 else 'sh'\n code = area + \".\" + code\n\n currentProfit = 0\n oldProfit = 0\n\n for quarter in range(lastQuarter,0,-1): # 最新盈利数据\n\n print(\"query %s %s year %s quarter profit:\"%(code,currentYear,quarter))\n profit = getProfit(code, currentYear, quarter)\n\n if profit is not None:\n try:\n currentProfit = float(profit['netProfit'])\n except Exception:\n currentProfit = 0 # 未找到对应数据\n else:\n currentProfit = 0\n\n if currentProfit != 0:\n break\n\n\n\n for quarter in range(lastQuarter, 0,-1): # 往期盈利数据\n\n print(\"query %s %s year %s quarter profit:\" % (code, currentYear - years, quarter))\n profit = getProfit(code, currentYear - years, quarter)\n\n if profit is not None:\n try:\n oldProfit = float(profit['netProfit'])\n except Exception:\n oldProfit = 0 # 未找到对应数据\n else:\n oldProfit = 0\n\n if oldProfit != 0:\n break\n\n\n print(\"%s current profit is: %s,old profit is: %s\"%(code,currentProfit,oldProfit))\n\n if currentProfit <= oldProfit:\n stocks.drop(index, inplace=True)\n\n return stocks\n\n\nif __name__ == \"__main__\":\n stocks = getAStocks('2020-03-23')\n\n growFilter(stocks,5)\n pass", "repo_name": "zzArche/quant", "sub_path": "filters/grow.py", "file_name": "grow.py", "file_ext": "py", "file_size_in_byte": 2111, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "utils.timeutil.getCurrentYear", "line_number": 9, "usage_type": "call"}, {"api_name": "utils.timeutil.getCurrentQuarter", "line_number": 10, "usage_type": "call"}, {"api_name": "base.profit.getProfit", "line_number": 31, "usage_type": "call"}, {"api_name": "base.profit.getProfit", "line_number": 49, "usage_type": "call"}, {"api_name": "base.stock.getAStocks", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "15333882220", "text": "# encoding: utf-8 #\r\n\"\"\"\r\n==========\r\nFile Name: 5 center \r\nAuthor: Oscar Fan\r\nDate: 2022/1/18\r\nrequirements: PyQt6\r\n==========\r\n\"\"\"\r\nimport sys\r\n\r\nfrom PyQt6.QtGui import QFont\r\nfrom PyQt6.QtWidgets import QApplication, QWidget\r\n\r\n\r\nclass Main(QWidget):\r\n\tdef __init__(self):\r\n\t\tsuper().__init__()\r\n\r\n\t\tself.initUI()\r\n\r\n\tdef initUI(self):\r\n\t\tself.setWindowTitle(\"5 Center\")\r\n\t\tself.setGeometry(300, 350, 400, 300)\r\n\t\tself.setToolTip(\"A window for file '5 center'\")\r\n\t\tself.setFont(QFont(\"SansSerif\", 12))\r\n\t\tself.center()\r\n\t\tself.show()\r\n\r\n\tdef center(self):\r\n\t\tqr = self.frameGeometry() # 一个矩形窗口可以放置所有类型的窗口\r\n\t\tcp = self.screen().availableGeometry().center() # 计算出分辨率和中心位置\r\n\r\n\t\tqr.moveCenter(cp) # 把窗口移到屏幕中心点位置\r\n\t\tself.move(qr.topLeft())\r\n\t\t# 把应用窗口的左上方的点移到矩形窗口的左上方,这样就可以居中显示了了\r\n\r\n\r\nif __name__ == '__main__':\r\n\tapp = QApplication(sys.argv)\r\n\tm = Main()\r\n\r\n\tsys.exit(app.exec())\r\n", "repo_name": "OscarFan26/python-a", "sub_path": "gui qt/learn/basic/5 center.py", "file_name": "5 center.py", "file_ext": "py", "file_size_in_byte": 1095, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "PyQt6.QtWidgets.QWidget", "line_number": 16, "usage_type": "name"}, {"api_name": "PyQt6.QtGui.QFont", "line_number": 26, "usage_type": "call"}, {"api_name": "PyQt6.QtWidgets.QApplication", "line_number": 40, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 40, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "11784949122", "text": "import os\nimport shutil\nimport sys\n\nimport pytest\n\ntry:\n import torch\n import numpy as np\n from mmcv import Config, ConfigDict\n\n # sys.path.append('/home/zhengchengyao/Document/Nerf/git/xrnerf')\n from xrnerf.models.builder import build_network\nexcept:\n pass\n\n\n@pytest.mark.skipif(not torch.cuda.is_available(),\n reason='No GPU device has been found.')\ndef test_hasnerf_network():\n\n model_cfg = dict(\n type='HashNerfNetwork',\n cfg=dict(\n phase='train', # 'train' or 'test'\n chunk=2048, # mainly work for val\n bs_data='rays_o',\n ),\n mlp=dict( # coarse model\n type='HashNerfMLP',\n bound=1,\n embedder_pos=dict(n_input_dims=3,\n encoding_config=dict(\n otype='HashGrid',\n n_levels=16,\n n_features_per_level=2,\n log2_hashmap_size=19,\n base_resolution=16,\n interpolation='Linear',\n )),\n embedder_dir=dict(n_input_dims=3,\n encoding_config=dict(\n otype='SphericalHarmonics',\n degree=4,\n )),\n density_net=dict(n_input_dims=32,\n n_output_dims=16,\n network_config=dict(\n otype='FullyFusedMLP',\n activation='ReLU',\n output_activation='None',\n n_neurons=64,\n num_layers=1,\n )),\n color_net=dict(\n # n_input_dims=32, # embedder_dir's out + density_net's out\n n_output_dims=3,\n network_config=dict(\n otype='FullyFusedMLP',\n activation='ReLU',\n output_activation='None',\n n_neurons=64,\n num_layers=2,\n )),\n ),\n sampler=dict(\n type='NGPGridSampler',\n update_grid_freq=16,\n update_block_size=5000000,\n n_rays_per_batch=2048,\n cone_angle_constant=0.00390625,\n near_distance=0.2,\n target_batch_size=1 << 18,\n rgb_activation=2,\n density_activation=3,\n ),\n render=dict(\n type='HashNerfRender',\n bg_color=[0, 0, 0],\n ),\n )\n\n n_imgs = 10\n alldata = {\n 'aabb_scale': 1,\n 'aabb_range': (0, 1),\n 'images': np.random.rand(n_imgs, 800, 800, 4),\n 'poses': np.random.rand(n_imgs, 4, 3),\n 'focal': np.ones((n_imgs, 2), dtype=float) * 1110,\n 'metadata': np.random.rand(n_imgs, 11),\n }\n K = np.array([[1111, 0., 400.], [0., 1111, 400.], [0., 0., 1.]])\n datainfo = {\n 'H': 800,\n 'W': 800,\n 'focal': 1111,\n 'K': K,\n 'hwf': [800, 800, 1111],\n 'near': 2.0,\n 'far': 6.0\n }\n model = build_network(ConfigDict(model_cfg))\n model.sampler.set_data(alldata, datainfo)\n model.cuda()\n\n data = {\n 'rays_o': torch.rand((2048, 3)).to(torch.float32),\n 'rays_d': torch.rand((2048, 3)).to(torch.float32),\n 'target_s': torch.rand((2048, 3)).to(torch.float32),\n 'alpha': torch.rand((2048, 1)).to(torch.float32),\n 'img_ids': torch.zeros((2048, 1)).to(torch.int32),\n 'bg_color': torch.rand((2048, 3)).to(torch.float32),\n }\n for k in data:\n data[k] = data[k].cuda().unsqueeze(0)\n\n ret = model.train_step(data, None)\n assert isinstance(ret['loss'], torch.Tensor)\n\n\n# test_hasnerf_network()\n", "repo_name": "openxrlab/xrnerf", "sub_path": "test/models/hashnerf/test_hashnerf_network.py", "file_name": "test_hashnerf_network.py", "file_ext": "py", "file_size_in_byte": 3917, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 529, "dataset": "github-code", "pt": "97", "api": [{"api_name": "numpy.random.rand", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 87, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 88, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 90, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 92, "usage_type": "call"}, {"api_name": "xrnerf.models.builder.build_network", "line_number": 102, "usage_type": "call"}, {"api_name": "mmcv.ConfigDict", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 107, "usage_type": "attribute"}, {"api_name": "torch.rand", "line_number": 108, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 108, "usage_type": "attribute"}, {"api_name": "torch.rand", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 109, "usage_type": "attribute"}, {"api_name": "torch.rand", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 110, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.int32", "line_number": 111, "usage_type": "attribute"}, {"api_name": "torch.rand", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 112, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 118, "usage_type": "attribute"}, {"api_name": "pytest.mark.skipif", "line_number": 18, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 18, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 18, "usage_type": "attribute"}]} +{"seq_id": "930296132", "text": "# -*- coding:utf-8 -*-\nfrom crawlMethods import baseCrawlMethod\nfrom utils import crawlUtils\nimport re\n\n\nclass cttCrawlMethod(baseCrawlMethod.crawlMethod):\n NAME = \"ctt\"\n DESCRIPTION = \"爬取创头条\"\n EXAMPLE_URL = \"http://www.ctoutiao.com/1937934.html\"\n USING = \"Soup\"\n EXTRACT_LATEST_RE = re.compile(\"target=\\\"_blank\\\" href=\\\"/(.+?)\\.html\\\"\")\n REQUIREMENT = {\n \"info\": {\n \"labels\": ['author', 'tag', 'title', 'summary',\n 'article'], # Implement here!\n \"isCrawlByIDAvailable\": True, # Implement here!\n \"isCrawlByTimeAvailable\": False, # Implement here!\n \"isCrawlByOrderAvailable\": True, # Implement here!\n }\n }\n\n @staticmethod\n def getLastestPostID():\n html = crawlUtils.crawlWorker(\"http://www.ctoutiao.com/\", \"Anon\", 0)['raw']\n return int(cttCrawlMethod.EXTRACT_LATEST_RE.findall(html)[0])\n\n @staticmethod\n def generateLinks(userParamObj):\n urlTemplate = \"http://www.ctoutiao.com/%s.html\"\n latestID = cttCrawlMethod.getLastestPostID()\n if userParamObj[\"crawlBy\"] == \"ORDER\":\n result = [\n urlTemplate % i\n for i in range(latestID - int(userParamObj[\"info\"][\"amount\"]), latestID)\n ]\n return result\n if userParamObj[\"crawlBy\"] == \"ID\":\n result = [urlTemplate % i for i in range(\n latestID - int(userParamObj[\"info\"][\"idRangeEnd\"]),\n latestID - int(userParamObj[\"info\"][\"idRangeStart\"]))\n ]\n return result\n return\n\n @staticmethod\n def generateRules(userParamObj):\n rulesObj = []\n\n if 'author' in userParamObj[\"info\"][\"requiredContent\"]:\n rulesObj.append({'name': 'author', 'rule': ['p', {'class': 'A_pon1'}, 0]})\n\n if 'tag' in userParamObj[\"info\"][\"requiredContent\"]:\n rulesObj.append({'name': 'tag', 'rule': ['div', {'class': 'A_linebn'}, 0]})\n\n if 'title' in userParamObj[\"info\"][\"requiredContent\"]:\n rulesObj.append({'name': 'title', 'rule': ['h1', {}, 0]})\n\n if 'summary' in userParamObj[\"info\"][\"requiredContent\"]:\n rulesObj.append({'name': 'summary', 'rule': ['div', {'class': 'A_zys'}, 0]})\n\n if 'article' in userParamObj[\"info\"][\"requiredContent\"]:\n rulesObj.append({'name': 'article', 'rule': ['div', {'class': 'A_contxt'}, 0]})\n\n return rulesObj\n\n @staticmethod\n def replaceSoup(soup):\n return soup\n", "repo_name": "crawlsh/crawlMethods", "sub_path": "cttCrawlMethod.py", "file_name": "cttCrawlMethod.py", "file_ext": "py", "file_size_in_byte": 2536, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "crawlMethods.baseCrawlMethod.crawlMethod", "line_number": 7, "usage_type": "attribute"}, {"api_name": "crawlMethods.baseCrawlMethod", "line_number": 7, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 12, "usage_type": "call"}, {"api_name": "utils.crawlUtils.crawlWorker", "line_number": 25, "usage_type": "call"}, {"api_name": "utils.crawlUtils", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "42704403564", "text": "from selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport random\nfrom stem import Signal\nfrom stem.control import Controller\nimport requests\nimport tester\n\ndef load():\n driver = webdriver.Chrome('/Users/stefanwinter/Desktop/tmp/herzvoter/chromedriver')\n driver.get('https://www.shoepy.at/pages/startup-des-jahres-voting?ose=false#')\n\n driver.implicitly_wait(1)\n\n print(\"######\")\n print(\"Start Clicking...\")\n\n frame = driver.find_element_by_xpath('//*[@id=\"surveyhero-embed-7b323892\"]/iframe')\n driver.switch_to.frame(frame)\n\n #privacy = driver.find_element_by_xpath('//*[@id=\"shopify-privacy-banner-button-div\"]/button[1]')\n #accept = driver.find_element_by_xpath('//*[@id=\"kt-cookies-cookies_popup\"]/div/div[2]/a[2]')\n\n #privacy.click()\n #driver.implicitly_wait(1)\n #accept.click()\n driver.implicitly_wait(10)\n print(\"inside iFrame\")\n #print(driver.page_source)\n element = driver.find_element_by_xpath('//input[@value=\"9470786\"]')\n #element = driver.find_element_by_xpath('//*[@id=\"Q3792700\"]/div/div[2]/form/div/div[30]/label/div[1]/span/input')\n #element.click()\n driver.implicitly_wait(random.randint(4, 15))\n driver.execute_script(\"arguments[0].click();\", element)\n print(\"clicked!\")\n driver.implicitly_wait(1)\n\n #driver.switch_to_default_content()\n \n send = driver.find_element_by_xpath('//*[@id=\"nav-right\"]')\n driver.execute_script(\"arguments[0].click();\", send)\n print(\"sent\")\n driver.implicitly_wait(1)\n \ndef ip():\n proxies = {\n 'http': 'socks5://127.0.0.1:9050',\n 'https': 'socks5://127.0.0.1:9050'\n }\n with Controller.from_port(port = 9051) as c:\n c.authenticate()\n c.signal(Signal.NEWNYM)\n\n print(requests.get('https://api.ipify.org', proxies=proxies).text)\n\n\ndef exec():\n i = 0\n while i < 5:\n i = i+1\n ip()\n load()\n\nexec()", "repo_name": "StefanWinterToo/Fun-Projects", "sub_path": "herzvoter/clicker.py", "file_name": "clicker.py", "file_ext": "py", "file_size_in_byte": 1935, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 10, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 10, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 33, "usage_type": "call"}, {"api_name": "stem.control.Controller.from_port", "line_number": 50, "usage_type": "call"}, {"api_name": "stem.control.Controller", "line_number": 50, "usage_type": "name"}, {"api_name": "stem.Signal.NEWNYM", "line_number": 52, "usage_type": "attribute"}, {"api_name": "stem.Signal", "line_number": 52, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "71167047998", "text": "from flask import Flask, render_template, request\nimport numpy as np\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.datasets import mnist\n\napp = Flask(__name__)\n\n# Load pre-trained model when the app starts.\nmodel = load_model('model.h5')\n\n# Load the MNIST dataset.\n(_, _), (x_test, y_test) = mnist.load_data()\nx_test = x_test.reshape(10000, 784).astype('float32') / 255.0\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n # Select a random test image and its label.\n i = np.random.randint(0, len(x_test))\n image, true_label = x_test[i], y_test[i]\n\n # Predict the digit in the image.\n pred_label = np.argmax(model.predict(image.reshape(1, -1)), axis=-1)\n\n return render_template('index.html', pred_label=pred_label[0], true_label=true_label)\n else:\n return render_template('index.html')\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "repo_name": "tgk22c/test1", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 952, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 9, "usage_type": "call"}, {"api_name": "tensorflow.keras.datasets.mnist.load_data", "line_number": 12, "usage_type": "call"}, {"api_name": "tensorflow.keras.datasets.mnist", "line_number": 12, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 17, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 17, "usage_type": "name"}, {"api_name": "numpy.random.randint", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 19, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "30455289555", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom onto.onto import Onto, first, Node, Link\nfrom enum import IntEnum\nimport math\nimport io\nimport struct\nimport re\n\n\nclass EonType(IntEnum):\n UINT8 = 0\n UINT16 = 1\n UINT32 = 2\n INT8 = 3\n INT16 = 4\n INT32 = 5\n FLOAT32 = 6\n STRING = 7\n\nclass Eon:\n def __init__(self, onto: Onto):\n self.onto = onto\n\n def get_number_of_ios(self, node: Node, type, ios):\n iosNodes = self.onto.get_typed_nodes_linked_from(node, \"has\", type)\n try:\n return iosNodes.index(ios)\n except ValueError:\n return None\n\n def get_io_op(self, ioInstID, type, dfdOnto: Onto):\n ioInst = dfdOnto.get_node_by_id(ioInstID)\n ioProto = first(dfdOnto.get_nodes_linked_from(ioInst, \"is_instance\"))\n ioMother = self.onto.get_node_by_id(ioProto.attributes[\"mother\"])\n ioMotherOp = first(self.onto.get_nodes_linked_to(ioMother, \"has\"))\n ioNumber = self.get_number_of_ios(ioMotherOp, type, ioMother)\n ioOpInst = first(dfdOnto.get_nodes_linked_to(ioInst, \"has\"))\n return ioOpInst, ioNumber\n\n def get_setting_by_name(self, node: Node, settingName):\n sNodes = self.onto.get_nodes_linked_from(node, \"has\")\n for sNode in sNodes:\n if sNode.name == settingName:\n return sNode\n return None\n\n def classify_int(self, intVal):\n if intVal < 0:\n if intVal < -32767:\n return EonType.INT32\n elif intVal < -127:\n return EonType.INT16\n else:\n return EonType.INT8\n else:\n if intVal > 65535:\n return EonType.UINT32\n elif intVal > 255:\n return EonType.UINT16\n else:\n return EonType.UINT8\n\n def guess_type(self, value):\n try:\n intVal = int(value)\n return self.classify_int(intVal), intVal\n except ValueError:\n try:\n floatVal = float(value)\n if floatVal.is_integer():\n intVal = int(floatVal)\n return self.classify_int(intVal), intVal\n else:\n return EonType.FLOAT32, floatVal\n except ValueError:\n if (len(value) == 0) or (value.startswith(\"'\") and value.endswith(\"'\")):\n return EonType.STRING, value\n else:\n raise ValueError(\"Cannot guess type of value <\" + value + \">\")\n\n def dump_value(self, value, type, buffer):\n if type == EonType.UINT8:\n buffer.write(struct.pack(\"!B\", value))\n elif type == EonType.UINT16:\n buffer.write(struct.pack(\"!H\", value))\n elif type == EonType.UINT32:\n buffer.write(struct.pack(\"!I\", value))\n elif type == EonType.INT8:\n buffer.write(struct.pack(\"!b\", value))\n elif type == EonType.INT16:\n buffer.write(struct.pack(\"!h\", value))\n elif type == EonType.INT32:\n buffer.write(struct.pack(\"!i\", value))\n elif type == EonType.FLOAT32:\n buffer.write(struct.pack(\"!f\", value))\n elif type == EonType.STRING:\n # String values are stored as null-terminated byte sequences.\n if len(value) > 0:\n buffer.write(value[1:-1].encode())\n buffer.write(bytes([0x0]))\n else:\n raise ValueError(\"Cannot dump value <\" + value + \"> of unknown type <\" + type + \">\")\n\n def get_eon(self, dfdOnto: Onto):\n dataFlowChunk = io.BytesIO()\n settingsChunk = io.BytesIO()\n keysChunk = io.BytesIO()\n dataFlowChunkLen = 0\n keys = {}\n \n for link in dfdOnto.links:\n if link.name == \"is_used\":\n dataFlowChunkLen += 1\n oOpInst, oNumber = self.get_io_op(link.source_node_id, \"Output\", dfdOnto)\n iOpInst, iNumber = self.get_io_op(link.destination_node_id, \"Input\", dfdOnto)\n dataFlowChunk.write(bytes([oOpInst.attributes[\"dfd\"], \\\n ((oNumber & 0x0F) << 4) | (iNumber & 0x0F), \\\n iOpInst.attributes[\"dfd\"]]))\n elif link.name == \"is_hosted\":\n opInst = dfdOnto.get_node_by_id(link.source_node_id)\n if not (\"settingsVal\" in opInst.attributes):\n continue\n settings = opInst.attributes[\"settingsVal\"]\n opProto = first(dfdOnto.get_nodes_linked_from(opInst, \"is_instance\"))\n opMother = self.onto.get_node_by_id(opProto.attributes[\"mother\"])\n for s in settings:\n sMother = self.get_setting_by_name(opMother, s)\n sNumber = self.get_number_of_ios(opMother, \"Setting\", sMother)\n if sNumber == None:\n continue\n sValue = settings[s]\n sType, convertedValue = self.guess_type(sValue)\n settingsChunk.write(bytes([opInst.attributes[\"dfd\"], \\\n ((sNumber & 0x0F) << 4) | (int(sType) & 0x0F)]))\n self.dump_value(convertedValue, sType, settingsChunk)\n if not (\"UID\" in opMother.attributes):\n raise ValueError(\"Operator <\" + opMother.name + \"> has no UID\")\n opMotherUID = opMother.attributes[\"UID\"]\n if opMotherUID in keys:\n keys[opMotherUID].append(opInst.attributes[\"dfd\"])\n else:\n keys[opMotherUID] = [opInst.attributes[\"dfd\"]]\n \n for k in keys:\n keysChunk.write(struct.pack(\"!H\", int(k) & 0xFFFF))\n keysChunk.write(bytes(keys[k]))\n keysChunk.write(bytes([0x0]))\n \n #\n # The EON 2.0 blob structure is as follows:\n # ----------------------------------------------------------------------------------\n # | DataFlowChunkLen | DataFlowChunk | SetingsChunkLen | SettingsChunk | KeysChunk |\n # ----------------------------------------------------------------------------------\n # where DataFlowChunkLen (1 byte) - number of 3-byte elements in DataFlowChunk\n # DataFlowChunk (3 * DataFlowChunkLen bytes) - chunk containing sequence of 'is_used' ontology links formed like this:\n # --------------------------------------\n # | OpInstA | Output | Input | OpInstB |\n # --------------------------------------\n # where OpInstA (1 byte), OpInstB (1 byte) - DFD IDs of the operators' instances, whereby output of OpInstA \n # is linked to the input of OpInstB\n # Output (4 bits) - number of output of OpInstA\n # Input (4 bits) - number of input of OpInstB\n # SettingsChunkLen (2 bytes) - length in bytes of SettingsChunk\n # SettingsChunk (SettingsChunkLen bytes) - chunk containing sequence of settings formed like this:\n # -----------------------------------\n # | OpInst | Setting | Type | Value |\n # -----------------------------------\n # where OpInst (1 byte) - DFD ID of operator's instance having the setting\n # Setting (4 bits) - number of setting of OpInst\n # Type (4 bits) - type ID of setting (see EonTypes)\n # Value - encoded setting value (length depends on type, strings are NULL-terminated)\n # KeysChunk (length is not stored) - chunk containing sequence of operators' instances formed like this:\n # --------------------------------------------\n # | MotherOp | OpInst1 | OpInst2 | ... | 0x0 |\n # --------------------------------------------\n # where MotherOp (2 bytes) - onto UID of operator (in mother ontology), which instances are used in DFD\n # OpInst1, OpInst2, ... (each 1 byte) - DFD IDs of MotherOp instances\n # 0x0 (1 byte) - zero byte terminating the list of MotherOp instances\n #\n result = io.BytesIO()\n if dataFlowChunkLen == 0:\n return result.getvalue(), dfdOnto\n result.write(bytes([dataFlowChunkLen]))\n result.write(dataFlowChunk.getbuffer())\n result.write(struct.pack(\"!H\", settingsChunk.getbuffer().nbytes))\n result.write(settingsChunk.getbuffer())\n result.write(keysChunk.getbuffer())\n\n print(result.getbuffer().nbytes)\n s = result.getvalue().hex()\n arr = (a+b for a,b in zip(s[::2], s[1::2]))\n outStr = \"\"\n for b in arr:\n outStr += \"0x\" + b + \", \"\n print(\"{%s}\" % (outStr))\n\n return result.getvalue(), dfdOnto\n", "repo_name": "scivi-tools/scivi.web", "sub_path": "server/eon.py", "file_name": "eon.py", "file_ext": "py", "file_size_in_byte": 9017, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "97", "api": [{"api_name": "enum.IntEnum", "line_number": 12, "usage_type": "name"}, {"api_name": "onto.onto.Onto", "line_number": 23, "usage_type": "name"}, {"api_name": "onto.onto", "line_number": 24, "usage_type": "name"}, {"api_name": "onto.onto.Node", "line_number": 26, "usage_type": "name"}, {"api_name": "onto.onto.Onto", "line_number": 33, "usage_type": "name"}, {"api_name": "onto.onto.first", "line_number": 35, "usage_type": "call"}, {"api_name": "onto.onto.first", "line_number": 37, "usage_type": "call"}, {"api_name": "onto.onto.first", "line_number": 39, "usage_type": "call"}, {"api_name": "onto.onto.Node", "line_number": 42, "usage_type": "name"}, {"api_name": "struct.pack", "line_number": 85, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 87, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 89, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 91, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 93, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 95, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 97, "usage_type": "call"}, {"api_name": "onto.onto.Onto", "line_number": 106, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 107, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 108, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 109, "usage_type": "call"}, {"api_name": "onto.onto.first", "line_number": 126, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 147, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 182, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 187, "usage_type": "call"}]} +{"seq_id": "698302420", "text": "import os\nimport subprocess\nimport datetime\nclass avlRun:\n\t\"\"\" This function takes in a terminal command which would open avl (ie. \"avl3.35\") and a list\n\t\tof strings containting avl commands to be executed in sequence (ie. [\"load plan.avl\", ....]).\n\t\treturns whether or not the commands were executed successfully \n\n\t\t--------------------------------------------------------------------------------------------\n\t\tINPUTS\n\t\t\t- avl_opn_cmd: \tstring that would open avl from terminal\n\t\t\t- cmd_list:\t\tlist of avl commands in string form\n\n\t\t--------------------------------------------------------------------------------------------\n\t\tOUTPUTS\n\t\t\t- avl_out:\t\tsubprocess object indicating whether avl ran successfully\n\n\t\"\"\"\n\tdef __init__(self,avl_opn_cmd,avl_file,mass_file):\n\t\t\"\"\" This is the initialization function for the avlRun class\n\n\t\t\t----------------------------------------------------------------------------------------\n\t\t\tINPUTS\n\t\t\t\t- avl_file: file path to avl file\n\t\t\t\t- mass_file: file path to mass file for avl\n\t\t\"\"\"\n\n\t\t# default command start\n\t\tself.avl_opn_cmd = avl_opn_cmd\n\n\t\tself.cmd_path = ['load ' + avl_file,\n\t\t\t\t\t\t 'mass ' + mass_file,\n\t\t\t\t\t 'mset 0']\n\n\t\tself.tmp_dir = \"tmp_avl\"\t\t\t \n\t\tos.makedirs(self.tmp_dir, exist_ok = True)\n # ---------------------------------------------------------------------------------------------\n # SETTING FLIGHT CONSTRAINTS\n # ---------------------------------------------------------------------------------------------\n\tdef set_flight_constraint(self,fregime,var,value):\n\t\t\"\"\" This function sets a constraint \n\n\t\t\t----------------------------------------------------------------------------------------\n\t\t\tINPUTS\n\t\t\t\t- fregime:\t\tflight regime ('C1' or 'C2')\n\t\t\t\t- var:\t\t\tvariable ('v', 'CL', etc.)\n\t\t\t\t- value:\t\tvalue to be set\n\t\t\"\"\"\n\n\t\tnew_cmd = ['oper','C1',var,str(value),'\\n','\\n']\n\t\t\n\t\t[self.cmd_path.append(j) for j in new_cmd]\n\n\tdef set_var_constraint(self,var,cnstr,value):\n\t\t\"\"\" This function sets a variable constraint\n\n\t\t\t----------------------------------------------------------------------------------------\n\t\t\tINPUTS\n\t\t\t\t- var:\t\t\tvariable to be set (ie. D1)\n\t\t\t\t- cnstr:\t\tconstaint variable (ie. PM)\n\t\t\t\t- value:\t\tvalue to set cnstr to (ie PM = 0 through D1)\n\t\t\"\"\"\n\n\n\t\tnew_cmd = ['oper',var,cnstr,str(value),'\\n']\n\n\t\t[self.cmd_path.append(j) for j in new_cmd]\n\n # ---------------------------------------------------------------------------------------------\n # RUNNING AVL PROCESSES\n # ---------------------------------------------------------------------------------------------\n\tdef get_flow_analysis(self,output):\n\t\t\"\"\" This function returns the flow analysis computed using avl\n\n\t\t\t---------------------------------------------------------------------------------------\n\t\t\tINPUTS\n\t\t\t\t- output:\t\tOPER output (typically FT, ST, etc.)\n\n\t\t\t---------------------------------------------------------------------------------------\n\t\t\tOUTPUTS\n\t\t\t\t- fname:\t\tfile save name\n\t\t\t\t- proc_out:\t\tsubprocess out\n\t\t\"\"\"\n\t\tfname = self.tmp_dir + \"/avlFA_\"+output +\"_\" +datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\") + \".txt\"\n\n\t\tnew_cmd = ['oper','x',output,fname,'\\n','q']\n\n\t\tcmd_tmp = self.cmd_path.copy()\n\n\t\t[cmd_tmp.append(j) for j in new_cmd]\n\n\t\tcmd_bytes = \"\\n\".join(cmd_tmp)\n\n\t\twith open(self.tmp_dir + '/stdout.txt','wb') as outfile:\n\t\t\tproc_out = subprocess.run(self.avl_opn_cmd, input=cmd_bytes.encode(), stdout = outfile)\n\t\t\n\t\treturn((fname,proc_out))\n\n\tdef get_eig_analysis(self,output):\n\t\t\"\"\" This function returns the eigen analysis using avl\n\n\t\t\t---------------------------------------------------------------------------------------\n\t\t\tINPUTS\n\t\t\t\t- output\n\t\t\"\"\"\n\t\tfname = self.tmp_dir +\"/avlEA_\"+output +\"_\" +datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\") + \".txt\"\n\n\t\tnew_cmd = ['oper','x','\\n','mode','0','n',output,fname,'\\n','q']\n\n\t\tcmd_tmp = self.cmd_path.copy()\n\n\t\t[cmd_tmp.append(j) for j in new_cmd]\n\n\t\tcmd_bytes = \"\\n\".join(cmd_tmp)\n\n\t\twith open(self.tmp_dir + '/stdout.txt','wb') as outfile:\n\t\t\tproc_out = subprocess.run(self.avl_opn_cmd, input=cmd_bytes.encode(), stdout = outfile)\n\t\t\n\t\treturn((fname,proc_out))\n\n\tdef custom_cmd(self,cmd_list):\n\t\t\"\"\" This function runs the custom command sequence given in cmd_list\n\n\t\t\t--------------------------------------------------------------------------------------\n\t\t\tINPUTS\n\t\t\t\t- cmd_list: list of string\n\t\t\"\"\"\n\t\tcmd_tmp = self.cmd_path.copy()\n\n\t\t[cmd_tmp.append(cmd)for cmd in cmd_list]\n\n\t\tif cmd_list[-1] == \"q\":\n\t\t\tcmd_bytes = \"\\n\".join(cmd_tmp)\n\t\telse:\n\t\t\tcmd_tmp.append(\"q\")\n\t\t\tcmd_bytes = \"\\n\".join(cmd_tmp)\n\n\t\twith open(self.tmp_dir + '/stdout.txt','wb') as outfile:\n\t\t\treturn(subprocess.run(self.avl_opn_cmd, input=cmd_bytes.encode(), stdout =outfile))\n\n", "repo_name": "coursekevin/avlpy", "sub_path": "avlpy/avlRun.py", "file_name": "avlRun.py", "file_ext": "py", "file_size_in_byte": 4718, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "97", "api": [{"api_name": "os.makedirs", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 84, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 84, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 95, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 106, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 106, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 117, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 139, "usage_type": "call"}]} +{"seq_id": "23802217415", "text": "# Views profiles\n\n# Django\nfrom django.http import Http404\n\n# Django rest framework\nfrom rest_framework import status\nfrom rest_framework.generics import get_object_or_404\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\n# Documentation\nfrom drf_yasg.utils import swagger_auto_schema\nfrom drf_yasg import openapi\n\n# Models\nfrom ..models import SupplierProfile\n\n# Permissions\nfrom rest_framework.permissions import AllowAny\n\n# Serializers\nfrom ..serializers import SupplierProfileSerializer, DocumentationSupplierProfileSerializer\n\n\nclass SupplierProfileView(APIView):\n \"\"\"View for request the profile of a supplier.\"\"\"\n\n permission_classes = [AllowAny]\n\n @swagger_auto_schema( operation_id = \"Retrieve Supplier Profile\", tags = [\"Profiles\"],\n responses = { 200: DocumentationSupplierProfileSerializer, 404: openapi.Response(\"Not Found\")}, security = []\n )\n def get(self, request, accountname, format = None):\n \"\"\"Return the profile of a supplier with the accountname given by param.\\n\n The Authorization access token is not required if the requesting user is a visitor (Buyer, Anonymous, etc).\n If the requester user want to edit the profile because it have access, the Authorization header must\n be included for validating credentials and returning the editable attribute as true if its the case.\n \"\"\"\n\n try:\n supplier = self.get_object(accountname)\n profile_supplier = {\n 'supplier': supplier\n }\n profile_serializer = SupplierProfileSerializer(\n profile_supplier,\n context = {'request': request}\n )\n\n data = profile_serializer.data\n data_status = status.HTTP_200_OK\n except Http404:\n data = {\"detail\": \"Supplier not found with the accountname provided\"}\n data_status = status.HTTP_404_NOT_FOUND\n\n return Response(data, status = data_status)\n\n def get_object(self, accountname):\n return get_object_or_404(\n SupplierProfile,\n company__accountname = accountname\n )", "repo_name": "JPGarzonE/business_network_core_API", "sub_path": "suppliers/views/profiles.py", "file_name": "profiles.py", "file_ext": "py", "file_size_in_byte": 2174, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "rest_framework.views.APIView", "line_number": 26, "usage_type": "name"}, {"api_name": "rest_framework.permissions.AllowAny", "line_number": 29, "usage_type": "name"}, {"api_name": "serializers.SupplierProfileSerializer", "line_number": 46, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 52, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 52, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 53, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_404_NOT_FOUND", "line_number": 55, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 55, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 57, "usage_type": "call"}, {"api_name": "drf_yasg.utils.swagger_auto_schema", "line_number": 31, "usage_type": "call"}, {"api_name": "serializers.DocumentationSupplierProfileSerializer", "line_number": 32, "usage_type": "name"}, {"api_name": "drf_yasg.openapi.Response", "line_number": 32, "usage_type": "call"}, {"api_name": "drf_yasg.openapi", "line_number": 32, "usage_type": "name"}, {"api_name": "rest_framework.generics.get_object_or_404", "line_number": 60, "usage_type": "call"}, {"api_name": "models.SupplierProfile", "line_number": 61, "usage_type": "argument"}]} +{"seq_id": "11967976727", "text": "\"\"\"Functions for importing and analyzing traffic traces\"\"\"\n\nimport collections\nimport math\nimport time\nimport types\n\nimport dateutil\n\nfrom icarus.tools import TruncatedZipfDist\n\nimport numpy as np\n\nfrom scipy.stats import chisquare\n\n\n__all__ = [\n \"frequencies\",\n \"one_timers\",\n \"trace_stats\",\n \"zipf_fit\",\n \"parse_url_list\",\n \"parse_wikibench\",\n \"parse_squid\",\n \"parse_youtube_umass\",\n \"parse_common_log_format\",\n]\n\n\ndef frequencies(data):\n \"\"\"Extract frequencies from traces. Returns array of sorted frequencies\n\n Parameters\n ----------\n data : array-like\n An array of generic data (i.e. URLs of web pages)\n\n Returns\n -------\n frequencies : array of int\n The frequencies of the data sorted in descending order\n\n Notes\n -----\n This function does not return the mapping between data elements and their\n frequencies, it only returns frequencies.\n This function can be used to get frequencies to pass to the *zipf_fit*\n function given a set of data, e.g. content request traces.\n \"\"\"\n return np.asarray(sorted(collections.Counter(data).values(), reverse=True))\n\n\ndef one_timers(data):\n \"\"\"Return fraction of contents requested only once (i.e., one-timers)\n\n Parameters\n ----------\n data : array-like\n An array of generic data (i.e. URLs of web pages)\n\n Returns\n -------\n one_timers : float\n Fraction of content objects requested only once.\n \"\"\"\n n_items = 0\n n_onetimers = 0\n counter = collections.Counter(data)\n for i in counter.itervalues():\n n_items += 1\n if i == 1:\n n_onetimers += 1\n return n_onetimers / n_items\n\n\ndef trace_stats(data):\n \"\"\"Print full stats of a trace\n\n Parameters\n ----------\n data : array-like\n An array of generic data (i.e. URLs of web pages)\n\n Return\n ------\n stats : dict\n Metrics of the trace\n \"\"\"\n if isinstance(data, types.GeneratorType):\n data = collections.deque(data)\n freqs = frequencies(data)\n alpha, p = zipf_fit(freqs)\n n_reqs = len(data)\n n_contents = len(freqs)\n n_onetimers = len(freqs[freqs == 1])\n return dict(\n n_contents=n_contents,\n n_reqs=n_reqs,\n n_onetimers=n_onetimers,\n alpha=alpha,\n p=p,\n onetimers_contents_ratio=n_onetimers / n_contents,\n onetimers_reqs_ratio=n_onetimers / n_reqs,\n mean_reqs_per_content=n_reqs / n_contents,\n )\n\n\ndef zipf_fit(obs_freqs, need_sorting=False):\n \"\"\"Returns the value of the Zipf's distribution alpha parameter that best\n fits the data provided and the p-value of the fit test.\n\n Parameters\n ----------\n obs_freqs : array\n The array of observed frequencies sorted in descending order\n need_sorting : bool, optional\n If True, indicates that obs_freqs is not sorted and this function will\n sort it. If False, assume that the array is already sorted\n\n Returns\n -------\n alpha : float\n The alpha parameter of the best Zipf fit\n p : float\n The p-value of the test\n\n Notes\n -----\n This function uses the method described in\n http://stats.stackexchange.com/questions/6780/how-to-calculate-zipfs-law-coefficient-from-a-set-of-top-frequencies\n \"\"\"\n try:\n from scipy.optimize import minimize_scalar\n except ImportError:\n raise ImportError(\n \"Cannot import scipy.optimize minimize_scalar. \"\n \"You either don't have scipy install or you have a \"\n \"version too old (required 0.12 onwards)\"\n )\n obs_freqs = np.asarray(obs_freqs)\n if need_sorting:\n # Sort in descending order\n obs_freqs = -np.sort(-obs_freqs)\n n = len(obs_freqs)\n\n def log_likelihood(alpha):\n return np.sum(\n obs_freqs\n * (\n alpha * np.log(np.arange(1.0, n + 1))\n + math.log(sum(1.0 / np.arange(1.0, n + 1) ** alpha))\n )\n )\n\n # Find optimal alpha\n alpha = minimize_scalar(log_likelihood)[\"x\"]\n # Calculate goodness of fit\n if alpha <= 0:\n # Silently report a zero probability of a fit\n return alpha, 0\n exp_freqs = np.sum(obs_freqs) * TruncatedZipfDist(alpha, n).pdf\n p = chisquare(obs_freqs, exp_freqs)[1]\n return alpha, p\n\n\ndef parse_url_list(path):\n \"\"\"Parse traces from a text file where each line contains a URL requested\n without timestamp or counters\n\n Parameters\n ----------\n path : str\n The path to the trace file to parse\n\n Returns\n -------\n trace : iterator of strings\n An iterator whereby each element is dictionary expressing all\n attributes of an entry of the trace\n \"\"\"\n with open(path) as f:\n yield from f\n return\n\n\ndef parse_wikibench(path):\n \"\"\"Parses traces from the Wikibench dataset\n\n Parameters\n ----------\n path : str\n The path to the trace file to parse\n\n Returns\n -------\n trace : iterator of dicts\n An iterator whereby each element is dictionary expressing all\n attributes of an entry of the trace\n \"\"\"\n with open(path) as f:\n for line in f:\n entry = line.split(\" \")\n yield dict(counter=int(entry[0]), timestamp=entry[1], url=entry[2])\n return\n\n\ndef parse_squid(path):\n \"\"\"Parses traces from a Squid log file.\n Parse a Squid log file.\n\n Squid is an HTTP reverse proxy. Its logs contains traces of all HTTP\n requests served and can be used for trace-driven simulations based on\n realistic HTTP workloads.\n Traces from the IRCache dataset are in this format.\n\n Parameters\n ----------\n path : str\n The path to the trace file to parse\n\n Returns\n -------\n trace : iterator of dicts\n An iterator whereby each element is dictionary expressing all\n attributes of an entry of the trace\n\n Notes\n -----\n Documentation describing the Squid log format is available here:\n http://wiki.squid-cache.org/Features/LogFormat\n \"\"\"\n with open(path) as f:\n for line in f:\n entry = line.split(\" \")\n timestamp = entry[0]\n duration = int(entry[1])\n client_addr = entry[2]\n log_tag, http_code = entry[3].split(\"/\")\n http_code = int(http_code)\n bytes_len = int(entry[4])\n req_method = entry[5]\n url = entry[6]\n client_ident = entry[7] if entry[7] != \"-\" else None\n hierarchy_data, hostname = entry[8].split(\"/\")\n content_type = entry[9] if entry[9] != \"-\" else None\n yield dict(\n time=timestamp,\n duration=duration,\n client_addr=client_addr,\n log_tag=log_tag,\n http_code=http_code,\n bytes_len=bytes_len,\n req_method=req_method,\n url=url,\n client_ident=client_ident,\n hierarchy_data=hierarchy_data,\n hostname=hostname,\n content_type=content_type,\n )\n return\n\n\ndef parse_youtube_umass(path):\n \"\"\"Parse YouTube collected at UMass campus network [1]_.\n\n These data were collected at UMass campus network over a a measurement\n period between June 2007 and March 2008.\n\n This function parses the request traces, named youtube.parsed.X.Y.dat.\n Each entry of the trace provides the following information elements:\n * Timestamp\n * YouTube server IP (anonymized)\n * Client IP (anonymized)\n * Request\n * Video ID\n * Content server IP\n\n Traces are available at http://traces.cs.umass.edu/index.php/Network/Network\n\n Parameters\n ----------\n path : str\n The path to the trace file to parse\n\n Returns\n -------\n trace : iterator of dicts\n An iterator whereby each element is dictionary expressing all\n attributes of an entry of the trace\n\n References\n ----------\n ..[1] Michael Zink, Kyoungwon Suh, Yu Gu and Jim Kurose,\n Watch Global Cache Local: YouTube Network Traces at a Campus Network -\n Measurements and Implications, in Proc. of IEEE MMCN'08\n \"\"\"\n with open(path) as f:\n for line in f:\n entry = line.split(\" \")\n timestamp = entry[0]\n youtube_server_addr = int(entry[1])\n client_addr = entry[2]\n request = entry[3]\n video_id = entry[4]\n content_server_addr = entry[5]\n yield dict(\n time=timestamp,\n youtube_server_addr=youtube_server_addr,\n client_addr=client_addr,\n request=request,\n video_id=video_id,\n content_server_addr=content_server_addr,\n )\n return\n\n\ndef parse_common_log_format(path):\n \"\"\"Parse files saved in the Common Log Format (CLF)\n\n Parameters\n ----------\n path : str\n The path to the Common Log Format file to parse\n\n Returns\n -------\n events : iterator\n iterator over the events parsed from the file\n\n Notes\n -----\n Common Log Format specifications:\n http://www.w3.org/Daemon/User/Config/Logging.html#common-logfile-format\n\n \"\"\"\n with open(path) as f:\n for line in f:\n entry = line.split(\" \")\n client_addr = entry[0]\n user_ident = entry[1]\n auth_user = entry[2]\n date = entry[3][1:-1]\n request = entry[4]\n status = int(entry[5])\n n_bytes = int(entry[6])\n # Convert timestamp into float\n t = time.mktime(\n dateutil.parser.parse(date.replace(\":\", \" \", 0)).timetuple()\n )\n event = dict(\n client_addr=client_addr,\n user_ident=user_ident,\n auth_user=auth_user,\n request=request,\n status=status,\n bytes=n_bytes,\n )\n yield t, event\n return\n", "repo_name": "icarus-sim/icarus", "sub_path": "icarus/tools/traces.py", "file_name": "traces.py", "file_ext": "py", "file_size_in_byte": 10037, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 62, "dataset": "github-code", "pt": "97", "api": [{"api_name": "numpy.asarray", "line_number": 50, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 50, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 68, "usage_type": "call"}, {"api_name": "types.GeneratorType", "line_number": 89, "usage_type": "attribute"}, {"api_name": "collections.deque", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 150, "usage_type": "call"}, {"api_name": "math.log", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 151, "usage_type": "call"}, {"api_name": "scipy.optimize.minimize_scalar", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 161, "usage_type": "call"}, {"api_name": "icarus.tools.TruncatedZipfDist", "line_number": 161, "usage_type": "call"}, {"api_name": "scipy.stats.chisquare", "line_number": 162, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 347, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 348, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 348, "usage_type": "attribute"}]} +{"seq_id": "42148165371", "text": "import os\nimport requests\nimport tarfile\n\n# URL of the dataset\ndataset_url = \"http://image.ntua.gr/iva/datasets/flickr_logos/flickr_logos_27_dataset.tar.gz\"\n\n# Path to save the downloaded file\ncurrent_folder = os.path.abspath(os.path.dirname(__file__))\nsave_path = os.path.join(current_folder, \"flickr_logos_27_dataset.tar.gz\")\n\n# Function to download the dataset\ndef download_dataset(url, save_path):\n response = requests.get(url, stream=True)\n if response.status_code == 200:\n with open(save_path, \"wb\") as file:\n for chunk in response.iter_content(chunk_size=1024):\n file.write(chunk)\n print(f\"Dataset downloaded successfully: {save_path}\")\n else:\n print(\"Failed to download the dataset. Please check the URL.\")\n\n# Function to extract the dataset\ndef extract_dataset(path, extract_to=current_folder):\n with tarfile.open(path, \"r:gz\") as tar:\n tar.extractall(path=extract_to)\n print(f\"Dataset extracted successfully: {path}\")\n \n # Get names of all items in the tar archive\n members = tar.getnames()\n \n # Check each item if it's a tar file\n for member in members:\n if member.endswith(('.tar.gz', '.tgz')):\n nested_tar_path = os.path.join(extract_to, member)\n extract_dataset(nested_tar_path, os.path.dirname(nested_tar_path))\n os.remove(nested_tar_path) # remove the nested tar file after extraction\n\n# Download the dataset\ndownload_dataset(dataset_url, save_path)\n\n# Extract the dataset\nextract_dataset(save_path)\n", "repo_name": "dmaldofcb/jabronis", "sub_path": "Scan2Invest/PredictionModel/download_dataset.py", "file_name": "download_dataset.py", "file_ext": "py", "file_size_in_byte": 1588, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "os.path.abspath", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 14, "usage_type": "call"}, {"api_name": "tarfile.open", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "10059230689", "text": "from pathlib import Path\nfrom enum import Enum, auto\nfrom datetime import datetime, timedelta\nimport re\nimport json\nimport subprocess\nimport yaml\nimport logging\nimport warnings\n\nfrom tqdm import TqdmExperimentalWarning\nfrom tqdm.rich import tqdm_rich\n\nfrom ffmpeg.chapters import Chapter\nfrom ffmpeg.encoders import VideoEncoders\nfrom ffmpeg.file_extensions import VideoExtensions\n\nwarnings.filterwarnings('ignore', category=TqdmExperimentalWarning)\n\nlogger = logging.getLogger(__name__)\n\ndef get_timedelta(time_str: str, str_format='%H:%M:%S.%f'):\n t = datetime.strptime(time_str, str_format)\n return timedelta(hours=t.hour, minutes=t.minute, seconds=t.second, microseconds=t.microsecond)\n\n\nclass FFmpeg:\n DEFAULT_TGT_EXT = VideoExtensions.MP4\n DEFAULT_TGT_CODEC = VideoEncoders.LIBX265\n\n CONVERT_SUFFIX = '-converted'\n CHAPTERS_SUFFIX = '-chapters'\n \n REGEX_NUMBER_GROUP = '([+-]?[0-9]*[.]?[0-9]*)'\n REGEX_TIME_GROUP = '([0-9]{2}:[0-9]{2}:[0-9]{2}[.[0-9]*]?)'\n REGEX_DURATION = re.compile(\n r'Duration:[ ]*' + REGEX_TIME_GROUP\n )\n REGEX_BITRATE = re.compile(\n r'bitrate=[ ]*(?P[+-]?[0-9]*[.]?[0-9]*)[ ]*(?P.*\\/s)'\n )\n # e.g. out_time=00:00:05.066667\n REGEX_OUT_TIME = re.compile(\n r'out_time=[ ]*' + REGEX_TIME_GROUP\n )\n REGEX_SPEED = re.compile(\n r'speed=[ ]*' + REGEX_NUMBER_GROUP + 'x'\n )\n REGEX_FRAME = re.compile(\n r'frame=[ ]*' + REGEX_NUMBER_GROUP\n )\n\n \n FFMETADATA_HEADER = ';FFMETADATA1'\n CHAPTER_TS_FORMAT = '%H:%M:%S'\n\n class ProgressMetric(Enum):\n FRAMES = auto()\n TIME = auto()\n\n @classmethod\n def add_chapters_from_yaml(cls, yaml_path: Path | str, overwrite: bool = False, inplace: bool = False):\n if isinstance(yaml_path, str):\n yaml_path = Path(yaml_path)\n with yaml_path.open('r') as f:\n data = yaml.safe_load(f)\n for video, chapter_data in data.items():\n ts_map = dict()\n for k in chapter_data:\n if isinstance(k, str):\n ts_map[k] = get_timedelta(k, str_format=cls.CHAPTER_TS_FORMAT)\n for ts_str, ts_timedelta in ts_map.items():\n chapter_data[ts_timedelta] = chapter_data.pop(ts_str)\n\n input_path = yaml_path.with_name(video)\n if input_path.exists():\n ff = FFmpeg(input_path=input_path, chapter_data=chapter_data)\n ff.write_chapters(overwrite=overwrite, inplace=inplace)\n else:\n logger.warning(f'skipping chapter information for file: {input_path}. File does not exist.')\n\n @classmethod\n def convert_set(\n cls, \n input_paths: set[Path], \n output_dir_path: Path = None, \n extension: VideoExtensions = None,\n overwrite: bool = None,\n vcodec: VideoEncoders = None,\n crf: int = None,\n fps: int = None,\n width: int = None,\n height: int = None,\n progress_metric: ProgressMetric = None\n ):\n for input_path in input_paths:\n ff = FFmpeg(input_path=input_path)\n\n if extension is None:\n extension = input_path.suffix\n\n if output_dir_path is not None:\n if isinstance(output_dir_path, str):\n output_dir_path = Path(output_dir_path)\n if output_dir_path.resolve() == input_path.parent.resolve():\n if extension == input_path.suffix:\n output_path = None # use default file name suffix\n else:\n output_path = input_path.with_name(f'{input_path.stem}.{extension}')\n else:\n output_path = output_dir_path.joinpath(f'{input_path.stem}.{extension}')\n else:\n if extension == input_path.suffix:\n output_path = None # use default file name suffix\n else:\n output_path = input_path.with_name(f'{input_path.stem}.{extension}')\n\n convert_kwargs = dict() # passing only what we have allows us to use defaults at the convert func\n if overwrite is not None:\n convert_kwargs['overwrite'] = overwrite\n if vcodec is not None:\n convert_kwargs['vcodec'] = vcodec\n if crf is not None:\n convert_kwargs['crf'] = crf\n if fps is not None:\n convert_kwargs['fps'] = fps\n if width is not None:\n convert_kwargs['width'] = width\n if height is not None:\n convert_kwargs['height'] = height\n if progress_metric is not None:\n convert_kwargs['progress_metric'] = progress_metric\n \n ff.convert(output_path=output_path, **convert_kwargs)\n\n @classmethod\n def find_uncoverted(cls, videos_dir_path: Path | str, convert_suffix: str = CONVERT_SUFFIX) -> set[Path]:\n all_vids = cls.find_videos(videos_dir_path=videos_dir_path) # first get everything\n converted_vids = cls.find_videos(videos_dir_path=videos_dir_path, stem_filter_str=f'*{convert_suffix}') # find the ones that are \"converted\"\n all_original_vids = all_vids.difference(converted_vids) # only consider original/non-converted videos\n\n originals_converted = set()\n for cv in converted_vids: # loop over the converted vids to remove the originals from the set\n for ov in all_original_vids:\n if cv.stem.startswith(ov.stem):\n originals_converted.add(ov)\n break\n\n return all_original_vids.difference(originals_converted) # return the set of originals not previously converted\n \n @classmethod\n def convert_all(\n cls, \n videos_dir_path: Path, \n output_dir_path: Path = None, \n extension: VideoExtensions = None,\n overwrite: bool = None,\n vcodec: VideoEncoders = None,\n crf: int = None,\n fps: int = None,\n width: int = None,\n height: int = None,\n progress_metric: ProgressMetric = None\n ):\n target_files = cls.find_uncoverted(videos_dir_path=videos_dir_path)\n\n convert_kwargs = dict() # passing only what we have allows us to use defaults at the convert func\n if output_dir_path is not None:\n convert_kwargs['output_dir_path'] = output_dir_path\n if overwrite is not None:\n convert_kwargs['overwrite'] = overwrite\n if vcodec is not None:\n convert_kwargs['vcodec'] = vcodec\n if crf is not None:\n convert_kwargs['crf'] = crf\n if fps is not None:\n convert_kwargs['fps'] = fps\n if width is not None:\n convert_kwargs['width'] = width\n if height is not None:\n convert_kwargs['height'] = height\n if progress_metric is not None:\n convert_kwargs['progress_metric'] = progress_metric\n cls.convert_set(input_paths=target_files, **convert_kwargs)\n\n @classmethod\n def find_videos(cls, videos_dir_path: Path | str, extensions: list[VideoExtensions] = VideoExtensions, stem_filter_str: str = '*') -> set[Path]:\n if isinstance(videos_dir_path, str):\n videos_dir_path = Path(videos_dir_path)\n paths = set()\n for ext in extensions:\n for f in videos_dir_path.glob(f'{stem_filter_str}.{ext}'):\n paths.add(f)\n return paths\n\n @classmethod\n def prep_cmd(cls, cmd: str, *args, suffix: str = None, **kwargs):\n if len(args) > 0:\n cmd += ' '\n cmd += ' '.join(args)\n if len(kwargs) > 0:\n for k, v in kwargs.items():\n if v is None:\n cmd += f' -{k}'\n else:\n cmd += f' -{k} {v}'\n if suffix is not None:\n cmd += f' {suffix}'\n return cmd\n \n @classmethod\n def get_base_ffmpeg_cmd(cls, overwrite: bool = False, input_path: Path | str = None):\n cmd = 'ffmpeg'\n kwargs = {\n 'progress': '-',\n 'nostats': None\n }\n if overwrite:\n kwargs['y'] = None\n if isinstance(input_path, str):\n input_path = Path(input_path)\n if input_path is not None:\n kwargs['i'] = f'\"{input_path.as_posix()}\"'\n return cls.prep_cmd(cmd, **kwargs)\n \n @classmethod\n def get_proc(cls, cmd: str):\n logger.debug(f'calling subprocess: {cmd}')\n return subprocess.Popen(\n args=cmd,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=False,\n )\n \n @classmethod\n def probe(cls, input_path: Path | str, *extra_args, **extra_kwargs):\n if isinstance(input_path, str):\n input_path = Path(input_path)\n cmd = 'ffprobe'\n args = [\n '-show_format',\n '-show_streams'\n ]\n kwargs = {\n 'of': 'json'\n }\n suffix = f'\"{input_path.as_posix()}\"'\n for a in extra_args:\n if a not in args:\n args.append(a)\n kwargs.update(extra_kwargs)\n\n cmd = cls.prep_cmd(cmd, *args, suffix=suffix, **kwargs)\n\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = p.communicate()\n if p.returncode != 0:\n raise Exception(f'ffprobe Error: stdout: {out}. stderr: {err}.')\n return json.loads(out.decode('utf-8'))\n \n\n def __init__(self, input_path: Path, chapter_data: dict[timedelta, str] = {}):\n self.chapters: dict[timedelta, Chapter] = dict()\n self.__info: dict = None\n self.input_path = input_path\n\n if len(chapter_data) > 0:\n self.add_chapters(chapter_data=chapter_data)\n \n @property\n def info(self):\n if self.__info is None:\n self.__refresh_info()\n return self.__info\n \n def __refresh_info(self):\n self.__info = self.probe(input_path=self.input_path)\n \n @property\n def video_info(self):\n return next(s for s in self.info['streams'] if s['codec_type'] == 'video')\n\n @property\n def total_duration(self):\n return timedelta(seconds=float(self.video_info['duration']))\n \n @property\n def total_frames(self):\n return int(self.video_info['nb_frames'])\n \n @property\n def metadata_file(self) -> Path:\n if self.input_path is not None:\n return self.input_path.parent.joinpath(f'{self.input_path.stem}.ini')\n \n def show_progress(self, proc: subprocess.Popen, progress_metric: ProgressMetric = None, progress_kwargs:dict = {}):\n if progress_metric is None:\n progress_metric = self.ProgressMetric.FRAMES\n if progress_metric is self.ProgressMetric.FRAMES:\n progress_kwargs['total'] = self.total_frames\n progress_kwargs['unit'] = ' frames'\n elif progress_metric is self.ProgressMetric.TIME:\n progress_kwargs['total'] = self.total_duration.total_seconds()\n progress_kwargs['unit'] = ' s'\n \n desc = progress_kwargs.pop('desc', self.input_path.stem)\n dynamic_ncols = progress_kwargs.pop('dynamic_ncols', True)\n\n progress = tqdm_rich(desc=desc, dynamic_ncols=dynamic_ncols, **progress_kwargs)\n\n while True:\n if proc.stdout is None:\n continue\n proc_stdout_line = proc.stdout.readline().decode('utf-8', errors='replace').strip()\n if progress_metric is self.ProgressMetric.FRAMES:\n frame_data = self.REGEX_FRAME.search(proc_stdout_line)\n if frame_data is not None:\n frame = int(frame_data.group(1))\n progress.update(frame - progress.n)\n continue\n elif progress_metric is self.ProgressMetric.TIME:\n out_time_data = self.REGEX_OUT_TIME.search(proc_stdout_line)\n if out_time_data is not None:\n out_time = get_timedelta(out_time_data.group(1))\n progress.update(out_time.total_seconds() - progress.n)\n continue\n else:\n progress.update()\n if proc_stdout_line == \"\" and proc.poll() is not None:\n break\n\n def convert(\n self, \n output_path: Path = None, \n overwrite: bool = False, \n vcodec: VideoEncoders | str = VideoEncoders.LIBX265, \n crf: int = 35,\n fps: int = None,\n width: int = None, \n height: int=None,\n progress_metric: ProgressMetric = None\n ):\n cmd = self.get_base_ffmpeg_cmd(overwrite=overwrite, input_path=self.input_path)\n\n if output_path is None:\n output_path = self.input_path.with_stem(f'{self.input_path.stem}{self.CONVERT_SUFFIX}')\n suffix = f'\"{output_path.as_posix()}'\n\n kwargs = dict()\n \n if vcodec is not None:\n if not isinstance(vcodec, str):\n vcodec = vcodec.value()\n kwargs['c'] = vcodec\n\n if crf is not None:\n kwargs['crf'] = crf\n\n filters = []\n\n if fps is not None:\n filters.append(f'fps={fps}')\n\n if (width is not None) or (height is not None):\n if width is None:\n width = -1\n if height is None:\n height = -1\n filters.append(f'scale={width}:{height}')\n\n if len(filters) > 0:\n fltr = ', '.join(filters)\n kwargs['vf'] = f'\"{fltr}\"'\n\n cmd = self.prep_cmd(cmd, suffix=suffix, **kwargs)\n proc = self.get_proc(cmd=cmd)\n self.show_progress(proc=proc, progress_metric=progress_metric)\n\n def add_chapter(self, chapter: Chapter):\n key = chapter.start\n self.chapters[key] = chapter\n return key\n\n def add_chapters(self, chapter_data: dict[timedelta, str]):\n chapter = None\n for chapter_ts, chapter_title in dict(sorted(chapter_data.items())).items(): # iterate over the chapter items from first to last ts\n if isinstance(chapter_ts, str):\n chapter_ts = get_timedelta(time_str=chapter_ts)\n if isinstance(chapter, Chapter):\n chapter.end = chapter_ts # add the previous chapter end ts\n self.add_chapter(chapter=chapter)\n chapter = Chapter(start=chapter_ts, title=chapter_title)\n if chapter is not None:\n chapter.end = self.total_duration # very last chapter can be added with end_ts as the total duration\n self.add_chapter(chapter=chapter)\n\n def sort_chapters(self):\n self.chapters = dict(sorted(self.chapters.items()))\n\n def create_metadata_ini(self):\n self.sort_chapters\n with self.metadata_file.open('w') as f:\n f.write(f'{self.FFMETADATA_HEADER}\\n')\n for c in self.chapters.values():\n if isinstance(c, Chapter):\n c.write_ini_section(f=f)\n \n def write_chapters(\n self, \n output_path: Path = None,\n overwrite: bool = False,\n inplace: bool = False,\n progress_metric: ProgressMetric = None\n ): \n self.create_metadata_ini()\n\n if output_path is None:\n if inplace is True:\n overwrite = True\n output_path = self.input_path.with_stem(f'{self.input_path.stem}.tmp')\n else:\n output_path = self.input_path.with_stem(f'{self.input_path.stem}{self.CHAPTERS_SUFFIX}')\n \n cmd = self.get_base_ffmpeg_cmd(overwrite=overwrite, input_path=self.input_path)\n\n kwargs = dict()\n kwargs['i'] = f'\"{self.metadata_file}\"'\n kwargs['map_chapters'] = 1\n kwargs['codec'] = 'copy'\n\n suffix = f'\"{output_path.as_posix()}\"'\n\n cmd = self.prep_cmd(cmd, suffix=suffix, **kwargs)\n proc = self.get_proc(cmd=cmd)\n self.show_progress(proc=proc, progress_metric=progress_metric)\n\n if inplace:\n output_path.replace(self.input_path)\n \n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG)", "repo_name": "pbouill/ffmpeg", "sub_path": "ffmpeg/ffmpeg.py", "file_name": "ffmpeg.py", "file_ext": "py", "file_size_in_byte": 16324, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "warnings.filterwarnings", "line_number": 18, "usage_type": "call"}, {"api_name": "tqdm.TqdmExperimentalWarning", "line_number": 18, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 24, "usage_type": "call"}, {"api_name": "ffmpeg.file_extensions.VideoExtensions.MP4", "line_number": 28, "usage_type": "attribute"}, {"api_name": "ffmpeg.file_extensions.VideoExtensions", "line_number": 28, "usage_type": "name"}, {"api_name": "ffmpeg.encoders.VideoEncoders.LIBX265", "line_number": 29, "usage_type": "attribute"}, {"api_name": "ffmpeg.encoders.VideoEncoders", "line_number": 29, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 36, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 39, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 43, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 46, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 49, "usage_type": "call"}, {"api_name": "enum.Enum", "line_number": 57, "usage_type": "name"}, {"api_name": "enum.auto", "line_number": 58, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 59, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 62, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 64, "usage_type": "call"}, {"api_name": "yaml.safe_load", "line_number": 66, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 85, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 86, "usage_type": "name"}, {"api_name": "ffmpeg.file_extensions.VideoExtensions", "line_number": 87, "usage_type": "name"}, {"api_name": "ffmpeg.encoders.VideoEncoders", "line_number": 89, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 104, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 137, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 154, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 155, "usage_type": "name"}, {"api_name": "ffmpeg.file_extensions.VideoExtensions", "line_number": 156, "usage_type": "name"}, {"api_name": "ffmpeg.encoders.VideoEncoders", "line_number": 158, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 187, "usage_type": "name"}, {"api_name": "ffmpeg.file_extensions.VideoExtensions", "line_number": 187, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 189, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 212, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 221, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 229, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 231, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 232, "usage_type": "attribute"}, {"api_name": "subprocess.STDOUT", "line_number": 233, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 238, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 240, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 257, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 257, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 261, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 264, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 264, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 265, "usage_type": "name"}, {"api_name": "ffmpeg.chapters.Chapter", "line_number": 265, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 287, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 294, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 298, "usage_type": "attribute"}, {"api_name": "tqdm.rich.tqdm_rich", "line_number": 311, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 336, "usage_type": "name"}, {"api_name": "ffmpeg.encoders.VideoEncoders", "line_number": 338, "usage_type": "name"}, {"api_name": "ffmpeg.encoders.VideoEncoders.LIBX265", "line_number": 338, "usage_type": "attribute"}, {"api_name": "ffmpeg.chapters.Chapter", "line_number": 381, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 386, "usage_type": "name"}, {"api_name": "ffmpeg.chapters.Chapter", "line_number": 391, "usage_type": "argument"}, {"api_name": "ffmpeg.chapters.Chapter", "line_number": 394, "usage_type": "call"}, {"api_name": "ffmpeg.chapters.Chapter", "line_number": 407, "usage_type": "argument"}, {"api_name": "pathlib.Path", "line_number": 412, "usage_type": "name"}, {"api_name": "logging.basicConfig", "line_number": 445, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 445, "usage_type": "attribute"}]} +{"seq_id": "71013998718", "text": "#Exercise in which various goals need to be met using python version 3\n#Alec Webb\n\n#import statistics\nfrom collections import Counter\n\n#check if the argument n is prime (is only divisible by 1 and itself). The smallest prime is 2.\ndef is_prime(n):\n\t#prime numbers are greater than 1, and not divisible by anything but themselves\n\t#cycle through 2 to n-1 to try all possibilities.\n\tif n > 1:\n\t\tfor i in range(2,n):\n\t\t\tif(n % i) == 0:\n\t\t\t\treturn False\n\t\telse:\n\t\t\treturn True\n\t\t\t\n#given a non-negative integer n, and optionally a dictionary results (whose keys are ints k and whose values are the result of fast_fib(k)), \n#calculate the nth fibonnaci number while memoizing the answers (don't use the pathologically slow recursive approach). \ndef fast_fib(n, results={}):\n\t#base conditions\n\tif n == 0: return 0\n\tif n == 1: return 1\n\t\n\t#recursion through result set\n\tif not n in results:\n\t\tresults[n] = fast_fib(n-1) + fast_fib(n-2)\n\treturn results[n]\n\n#given a list of values, create a new list with the same values in reversed order. Do not modify the original list.\ndef reversed(xs):\n\t#use extended slice to reverse the list\n\treturn xs[::-1]\n\n#return a list of all values that show up in xs at least once. The answer must not include any duplicates, \n#and must report the occurring values in the same order as their first occurrence.\t\ndef nub(xs):\n\t#self explanatory, creates empty list (nub), traverse given list\n\t#place items from xs that dont exist yet in nub, into nub\n\tnub = []\n\tfor item in xs:\n\t\tif item not in nub:\n\t\t\tnub.append(item)\n\n\treturn nub\n\n#given a two-argument function and two lists of arguments to supply, create a list of the results of applying \n#the function to each same-indexed pair of arguments from the two lists.\ndef zip_with(f, xs, ys):\n\t#in the event either list has no entries\n\tif len(xs) < 1 or len(ys) < 1:\n\t\tsolution = []\n\t#in the event list1 is larger than list2\n\tif len(xs) >= len(ys):\n\t\tsolution = list(map(f,xs[0:len(ys):1],ys))\n\t#in the event list2 is larger than list1\n\tif len(xs) <=len(ys):\n\t\tsolution = list(map(f,xs,ys[0:len(xs):1]))\n\n\treturn solution\n\t\n#given a number n, we generate successive integer values in a sequence, which must end with a 1.\n#if n is 1, the sequence ends.\n#if n is even, the next number is n/2.\n#if n is odd, the next number is n*3+1.\ndef collatz(n):\n\tcolist = []\n\t\n\twhile n != 1:\n\t\tcolist.append(n)\n\t\t#iseven\n\t\tif n%2 == 0:\n\t\t\tn = int(n/2)\n\t\t#isodd\n\t\telse:\n\t\t\tn = 3*n+1\n\t#all roads lead to 1\n\tcolist.append(1)\n\n\treturn colist\n\ndef max_or_mode(check, switch):\n\t#this function finds the maximum value of identical items when switch is 0\n\t#or the mode when switch is 1\n\tif switch == 0:\n\t\tcounter = Counter(check)\n\t\tcmax = max(counter.values())\n\t\treturn cmax \n\t\n\t#use counter, and list comprehension for multimodal lists\n\t#the counter counts the number of occurances of each number, creating in an i,j pair\n\t#the cmax variable finds the maximum occurance of one or more list entries\n\t#the mode uses a list comprehension to cycle through the i,j pairs to match\n\t#and places the number(i) with the (j) value equivalent to the cmax\n\tif switch == 1:\n\t\tcounter = Counter(check)\n\t\tcmax = max(counter.values())\n\t\tmode = [i for i, j in counter.items() if j == cmax]\n\t\treturn mode\n\n\t\t\n#Given the name of a text file that contains one or more lines, each with a single integer on that line, \n#calculate these three properties and return in a triplet: (mean, median, mode)\ndef file_report(filename):\n\twith open(filename) as f:\n\t\tnumbers = [num.strip('\\n') for num in f.readlines()]\n\n\t#convert from str to int\n\tnumbers = [int(n) for n in numbers]\n\n\t#use stat package if python version 3.4 or greater\n\t#mean = statistics.mean(numbers)\n\t#median = statistics.median(numbers)\n\t#otherwise must calculate ourselves\n\t\n\tmean = sum(numbers)/len(numbers)\n\tmode = max_or_mode(numbers, 1)\n\t\n\t#to get the median, sort the list\n\tnumbers.sort()\n\t#find the middle position using the floor division operator\n\tmiddle = len(numbers)//2\n\t#check if the list is even or odd, if odd take the middle number\n\tif len(numbers) % 2:\n\t\tmedian = numbers[middle]\n\t#if the list is even average the two numbers in the middle\n\telse:\n\t\tmedian = (numbers[middle] + numbers[middle-1]) / 2\n\n\treturn (mean, median, mode)\n\n\t\n#Given a 9x9 2d list, check if it represents a valid, solved sudoku. We aren't solving a sudoku, we're only checking someone's completed attempt.\t\ndef check_sudoku(grid):\n\t#to check the rows, pull and separate values, use the counter to check if the\n\t#each number has one occurance, this would indicate no duplication of values 0-9\n\tfor row in grid:\n\t\tcheck = row[:]\n\t\tif max_or_mode(check,0) != 1:\n\t\t\treturn False\n\n\t#to check for the columns, do the same thing but traverse the columns and place in \n\t#the list\n\tfor i in range(9):\n\t\tcheck = []\n\t\tfor j in range(9):\n\t\t\tcheck.append(grid[j][i])\n\t\tif max_or_mode(check,0) != 1:\n\t\t\treturn False\n\n\t#to check the 3x3 grids, the offset allows traversel through the component grids\n\t#apply the same count check.\n\tfor i in range(3):\n\t\tfor j in range(3):\n\t\t\tcheck = []\n\t\t\troffset = i*3\n\t\t\tcoffset = j*3\n\t\t\tfor x in range(3):\n\t\t\t\tfor y in range(3):\n\t\t\t\t\tcheck.append(grid[x + roffset][y + coffset])\n\t\t\tif max_or_mode(check,0) != 1:\n\t\t\t\treturn False\n\n\treturn True\n\n#DEBUG\n#grid = [[1,2,3, 4,5,6, 7,8,9],\n#\t\t [4,5,6, 7,8,9, 1,2,3],\n#\t\t [7,8,9, 1,2,3, 4,5,6],\n\n#\t\t [2,3,4, 5,6,7, 8,9,1],\n#\t\t [5,6,7, 8,9,1, 2,3,4],\n#\t\t [8,9,1, 2,3,4, 5,6,7],\n\n#\t\t [3,4,5, 6,7,8, 9,1,2],\n#\t\t [6,7,8, 9,1,2, 3,4,5],\n#\t\t [9,1,2, 3,4,5, 6,7,8],\n#\t\t]\n", "repo_name": "alecwebb/python", "sub_path": "puzzles.py", "file_name": "puzzles.py", "file_ext": "py", "file_size_in_byte": 5504, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "collections.Counter", "line_number": 86, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 96, "usage_type": "call"}]} +{"seq_id": "74123995839", "text": "from flask.ext.appbuilder.security.models import User, Role\nfrom app import db, appbuilder\nfrom app.models import *\n\ndef default_blog_config():\n try:\n admin_role = db.session.query(Role).filter_by(name=appbuilder.sm.auth_role_admin).first()\n admin = db.session.query(User)\\\n .filter(User.role == admin_role)\\\n .first()\n # We will always have a config named default,\n # to make sure we don't break anything important\n default_config = BlogConfig(\n name=\"default\",\n description=\"A good default configuration\",\n blog_title=\"Blog Title\",\n blog_subtitle=\"A blog subtitle\",\n window_title=\"A blog\",\n edit_lag_in_minutes=15,\n entries_in_sidebar=10,\n entries_per_page=5,\n entries_in_feed=5,\n comments_in_feed=100,\n show_all_tab=True,\n created_by=admin,\n changed_by=admin)\n db.session.add(default_config)\n db.session.flush()\n choose_config = ChooseConfig(chosen_config_id=default_config.id, lock=0,\n created_by=admin, changed_by=admin)\n db.session.add(choose_config)\n db.session.commit()\n except:\n db.session.rollback()\n\ndef default_categories():\n # A default category to avoid having zero categories\n try:\n default_category = Category(\n name=\"default\",\n slug=\"default\",\n description=\"A default category (don't add entries here)\",\n show=False,\n index=0)\n db.session.add(default_category)\n db.session.commit()\n except:\n db.session.rollback()\n\n if len(db.session.query(Category).all()) == 1:\n main_category = Category(\n name=\"Main\",\n slug=\"main\",\n description=\"Main category for the Blog\",\n show=True,\n index=1)\n about_category = Category(\n name=\"About\",\n slug=\"about\",\n description=\"'About' category for the blog\",\n show=True,\n index=2)\n contact_category = Category(\n name=\"Contact\",\n slug=\"contact\",\n description=\"'Contact' category for the blog\",\n show=True,\n index=3)\n\n db.session.add_all([main_category,\n about_category,\n contact_category])\n db.session.commit()\n\ndef initialize():\n default_blog_config()\n default_categories()\n", "repo_name": "cryptonomicon314/flog", "sub_path": "app/initialize.py", "file_name": "initialize.py", "file_ext": "py", "file_size_in_byte": 2556, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "97", "api": [{"api_name": "app.db.session.query", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.ext.appbuilder.security.models.Role", "line_number": 7, "usage_type": "argument"}, {"api_name": "app.db.session", "line_number": 7, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 7, "usage_type": "name"}, {"api_name": "app.appbuilder.sm", "line_number": 7, "usage_type": "attribute"}, {"api_name": "app.appbuilder", "line_number": 7, "usage_type": "name"}, {"api_name": "app.db.session.query", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.ext.appbuilder.security.models.User", "line_number": 8, "usage_type": "argument"}, {"api_name": "app.db.session", "line_number": 8, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 8, "usage_type": "name"}, {"api_name": "flask.ext.appbuilder.security.models.User.role", "line_number": 9, "usage_type": "attribute"}, {"api_name": "flask.ext.appbuilder.security.models.User", "line_number": 9, "usage_type": "name"}, {"api_name": "app.db.session.add", "line_number": 27, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 27, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 27, "usage_type": "name"}, {"api_name": "app.db.session.flush", "line_number": 28, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 28, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 28, "usage_type": "name"}, {"api_name": "app.db.session.add", "line_number": 31, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 31, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 31, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 32, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 32, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 32, "usage_type": "name"}, {"api_name": "app.db.session.rollback", "line_number": 34, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 34, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 34, "usage_type": "name"}, {"api_name": "app.db.session.add", "line_number": 45, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 45, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 45, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 46, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 46, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 46, "usage_type": "name"}, {"api_name": "app.db.session.rollback", "line_number": 48, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 48, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 48, "usage_type": "name"}, {"api_name": "app.db.session.query", "line_number": 50, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 50, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 50, "usage_type": "name"}, {"api_name": "app.db.session.add_all", "line_number": 70, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 70, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 70, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 73, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 73, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 73, "usage_type": "name"}]} +{"seq_id": "34768040124", "text": "from typing import TypeVar, Optional, Callable, Generic\n\nComputedValue = TypeVar(\"ComputedValue\")\nComputedValueFactory = Callable[[], ComputedValue]\n\n\nclass ComputedField(Generic[ComputedValue]):\n _value: Optional[ComputedValue] = None\n _factory: ComputedValueFactory\n\n def __init__(self, factory: ComputedValueFactory):\n self._factory = factory\n\n @property\n def value(self) -> ComputedValue:\n if self._value is None:\n self._value = self._factory()\n\n return self._value\n\n def clear_cached_value(self):\n self._value = None\n", "repo_name": "Mr-Thack/grapejuice", "sub_path": "src/grapejuice_common/util/computed_field.py", "file_name": "computed_field.py", "file_ext": "py", "file_size_in_byte": 579, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "typing.TypeVar", "line_number": 3, "usage_type": "call"}, {"api_name": "typing.Callable", "line_number": 4, "usage_type": "name"}, {"api_name": "typing.Generic", "line_number": 7, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 8, "usage_type": "name"}]} +{"seq_id": "70380270080", "text": "from dataset.dataset import DataSet\nfrom settings import model_dir\nfrom model.xception import Xception\n\nimport tensorflow as tf\nfrom pathlib import Path\nfrom matplotlib import pyplot as plt\n\nclass Train:\n \n def __init__(self):\n self.ds = DataSet()\n\n def train(self, lr=1e-3, epochs = 12):\n X, Y = self.ds.load_dataset()\n \n print(\"dataset size {}\".format(X.shape))\n\n batch , num_features , num_channel = X.shape\n xception = Xception(num_classes=13, num_features=num_features)\n\n checkpoint_filepath = str(model_dir.joinpath('model.h5'))\n \n model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath=checkpoint_filepath,\n monitor='acc',\n mode='max',\n verbose=1,\n save_best_only=True)\n\n \n model = xception.get_model()\n adam = tf.keras.optimizers.Adam(lr=lr)\n\n model.compile(optimizer=adam, loss='sparse_categorical_crossentropy',\n metrics=['accuracy']) \n\n history = model.fit(X, Y, epochs=epochs, \n callbacks=[model_checkpoint_callback],\n verbose=2)\n\n model_json = model.to_json()\n with open(str(model_dir.joinpath('model.json')), \"w\") as json_file:\n json_file.write(model_json)\n \n model.save_weights(str(model_dir.joinpath('model.h5')))\n print(\"Saved model to disk\")\n\n plt.plot(history.history['accuracy'])\n plt.title('Model accuracy')\n plt.ylabel('Accuracy')\n plt.xlabel('Epoch')\n plt.legend(['Train'], loc='upper left')\n plt.savefig(str(model_dir.joinpath('acc.png')))\n plt.close()\n\n\n plt.plot(history.history['loss'])\n plt.title('Model loss')\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.legend(['Train'], loc='upper left')\n plt.savefig(str(model_dir.joinpath('loss.png')))\n", "repo_name": "EmersonAlvaro/genesis", "sub_path": "genesis/deepgenesis/train/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 1929, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "dataset.dataset.DataSet", "line_number": 12, "usage_type": "call"}, {"api_name": "model.xception.Xception", "line_number": 20, "usage_type": "call"}, {"api_name": "settings.model_dir.joinpath", "line_number": 22, "usage_type": "call"}, {"api_name": "settings.model_dir", "line_number": 22, "usage_type": "name"}, {"api_name": "tensorflow.keras.callbacks.ModelCheckpoint", "line_number": 24, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 24, "usage_type": "attribute"}, {"api_name": "model.xception", "line_number": 32, "usage_type": "name"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 33, "usage_type": "attribute"}, {"api_name": "model.xception.compile", "line_number": 35, "usage_type": "call"}, {"api_name": "model.xception", "line_number": 35, "usage_type": "name"}, {"api_name": "model.xception.fit", "line_number": 38, "usage_type": "call"}, {"api_name": "model.xception", "line_number": 38, "usage_type": "name"}, {"api_name": "model.xception.to_json", "line_number": 42, "usage_type": "call"}, {"api_name": "model.xception", "line_number": 42, "usage_type": "name"}, {"api_name": "settings.model_dir.joinpath", "line_number": 43, "usage_type": "call"}, {"api_name": "settings.model_dir", "line_number": 43, "usage_type": "name"}, {"api_name": "model.xception.save_weights", "line_number": 46, "usage_type": "call"}, {"api_name": "model.xception", "line_number": 46, "usage_type": "name"}, {"api_name": "settings.model_dir.joinpath", "line_number": 46, "usage_type": "call"}, {"api_name": "settings.model_dir", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "settings.model_dir.joinpath", "line_number": 54, "usage_type": "call"}, {"api_name": "settings.model_dir", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "settings.model_dir.joinpath", "line_number": 63, "usage_type": "call"}, {"api_name": "settings.model_dir", "line_number": 63, "usage_type": "name"}]} +{"seq_id": "29073067838", "text": "#!/usr/bin/env python3\n\n# Скрипт собирает:\n# - данные по загрузке ЦП, load_average за последнюю минуту и % загрузки\n# - данные по ОЗУ и swap: общее количество, свободно и доступно\n# - uptime в секундах\n# Данные складывает в формате json в лог за текущий день в папке /var/log\n\nfrom datetime import datetime\nimport json \nimport re\n\ncur_datetime = datetime.now()\ncur_date = cur_datetime.strftime(\"%Y-%m-%d\")\ncur_timestamp = cur_datetime.strftime(\"%s\")\nmetcirs_log = f\"/var/log/{cur_date}-awesome-monitoring.log\"\n\n\ndef cpu_info():\n\n cpu_info = {\n \"cpu_percent_load\": 0,\n \"load_average_minute\": 0,\n \"running_processes\": 0,\n \"total_processes\": 0\n }\n\n # Количество потоков процессора\n # /proc/cpuinfo\n with open('/proc/cpuinfo', 'r') as f:\n number_of_cpus = float(0)\n for string in f:\n counter = re.match(r'^processor[\\s]*\\:[\\s]*\\d', string)\n if counter:\n number_of_cpus += 1\n\n # Load Average\n # /proc/loadavg\n with open('/proc/loadavg', 'r') as f:\n for string in f:\n la_1m, la_5m, la_15m, proc_info, newest_pid = string.split(' ')\n running_processes, total_processes = proc_info.split('/')\n\n cpu_info['running_processes'] = int(running_processes)\n cpu_info['total_processes'] = int(total_processes)\n cpu_info['load_average_minute'] = float(la_1m)\n cpu_info['cpu_percent_load'] = \\\n int(float(cpu_info['load_average_minute'])/number_of_cpus*100)\n\n return cpu_info\n\n\ndef mem_info():\n mem_info = {\n \"mem_total_kb\": 0,\n \"mem_free_kb\": 0,\n \"mem_available_kb\":0,\n \"swap_total_kb\": 0,\n \"swap_free_kb\": 0\n }\n # cat /proc/meminfo\n with open('/proc/meminfo', 'r') as f:\n for string in f:\n mem_regexp = re.match(r'^([\\w\\(\\)\\_]*):\\s*(\\d*)\\skB', string)\n if mem_regexp and mem_regexp.group(1) == 'MemTotal':\n mem_info['mem_total_kb'] = int(mem_regexp.group(2))\n elif mem_regexp and mem_regexp.group(1) == 'MemFree':\n mem_info['mem_free_kb'] = int(mem_regexp.group(2))\n elif mem_regexp and mem_regexp.group(1) == 'MemAvailable':\n mem_info['mem_available_kb'] = int(mem_regexp.group(2))\n elif mem_regexp and mem_regexp.group(1) == 'SwapTotal':\n mem_info['swap_total_kb'] = int(mem_regexp.group(2))\n elif mem_regexp and mem_regexp.group(1) == 'SwapFree':\n mem_info['swap_free_kb'] = int(mem_regexp.group(2))\n return mem_info\n\n\n\ndef uptime_info():\n # \"/proc/uptime\"\n with open('/proc/uptime', 'r') as f:\n uptime_seconds = int(float(f.read().split()[0]))\n return uptime_seconds\n\n\ndef main():\n\n # Записать таймстемп\n export_data = {\"timestamp\": cur_timestamp}\n # Добавить данные по системе\n export_data = {\n **export_data, \n **cpu_info(), \n **mem_info(), \n \"uptime_seconds\": uptime_info()\n }\n\n # Записываем в файл\n with open(metcirs_log, 'a') as f:\n f.write(json.dumps(export_data))\n f.write('\\n')\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "run0ut/devops-netology", "sub_path": "03-mnt-homeworks/10-monitoring-01-base/media/gather_metrics.py", "file_name": "gather_metrics.py", "file_ext": "py", "file_size_in_byte": 3385, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "datetime.datetime.now", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 13, "usage_type": "name"}, {"api_name": "re.match", "line_number": 33, "usage_type": "call"}, {"api_name": "re.match", "line_number": 64, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "41410591625", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCS224N 2019-20: Homework 5\n\"\"\"\n\nimport torch\nimport torch.nn as nn\n\nclass CNN(nn.Module):\n \"\"\"\n a cnn networ that's used to encode the character embeddings and calculate the word embeddings from it\n\n \"\"\"\n ### YOUR CODE HERE for part 1g\n def __init__(self , word_embed_size, char_embed_size ):\n\n \"\"\"\n a cnn networ that's used to encode the character embeddings and calculate the word embeddings from it\n\n args:\n word_embed_size : the size of the word embeddings\n char_embed_size : the size of the char embeddings\n \"\"\"\n\n super(CNN, self).__init__()\n\n self.word_embed_size = word_embed_size\n self.char_embed_size = char_embed_size\n self.conv1 = nn.Conv1d(char_embed_size , word_embed_size , kernel_size = 5 , padding = 1)\n\n\n def forward(self , sents_char_embedded):\n\n \"\"\"\n calculate the word embeddings out of a char embeddings input\n\n args:\n sents_char_embedded: an input tensor of shape (max_seq_len , batch_size , max_word_len , char_embed)\n\n returns:\n xconv: the output tensor of shape (max_seq_len , batch_size , word_embedding_size)\n\n \"\"\"\n\n max_seq_len = sents_char_embedded.shape[0]\n batch_size = sents_char_embedded.shape[1]\n max_word_len = sents_char_embedded.shape[2]\n\n \n\n #reshaping the input to prepare it for the convNet\n sents_char_embedded = sents_char_embedded.contiguous().reshape((max_seq_len * batch_size , max_word_len , self.char_embed_size)).transpose(1,2) # (max_seq_len * batch_size , char_embed , max_word_len)\n\n xconv = nn.functional.relu(self.conv1(sents_char_embedded))# (max_seq_len * batch_size , word_embed , max_word_len-2)\n\n xconv = torch.max( xconv , dim = 2)[0] # (max_seq_len * batch_size , word_embed )\n\n xconv = xconv.contiguous().reshape((max_seq_len , batch_size , self.word_embed_size)) #shape (max_seq_len , batch_size , word_embedding_size)\n\n\n return xconv\n\n\n ### END YOUR CODE\n", "repo_name": "mohieEldeen/Neural-Machine-Translator-NMT-", "sub_path": "Improved NMT/cnn.py", "file_name": "cnn.py", "file_ext": "py", "file_size_in_byte": 2099, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "torch.nn.Module", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 56, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 56, "usage_type": "name"}, {"api_name": "torch.max", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "8490126835", "text": "import os.path\n\n# Import pytest.\nimport pytest\n\n# Import Tkinter canvas postscript utilities.\nfrom . import tkinter_canvas_postscript\n\n# Import tkinter_shapes.\nimport tkinter_shapes\n\n# Import tkinter_spheres_environment_gui\nimport tkinter_spheres_environment_gui\n\n\n# Initialize a reference GUI\n@pytest.fixture\ndef reference_environment_gui():\n \n # Initialize a GUI with a spheres_environment interface.\n environment_gui = tkinter_spheres_environment_gui.Environment()\n \n # Initialize a rectangular, black polygon, with the same size as the \n # canvas. This can be necessary to capture the black background, which is \n # important when the saved test images are used for other purposes (e.g., \n # documentation). Otherwise, the saved images will not match the canvas as \n # it appears to the user.\n canvas = environment_gui.gui.canvas\n (w, h) = canvas.dimensions\n vertices = [(0, 0), (0, h), (w, h), (w, 0)]\n rectangle = tkinter_shapes.Polygon(canvas=canvas, vertices=vertices)\n rectangle['fill'] = rectangle['outline'] = 'black'\n \n # Update the GUI / canvas.\n environment_gui.update()\n \n # Yield the fixture product.\n yield environment_gui\n \n # Cleanup the fixture product.\n del rectangle\n del environment_gui\n \n \n\n# Initialize a path to an image file repository.\n@pytest.fixture\ndef images_basepath(): return 'data/images'\n \n \n\n# Initialize a generator that yields postscript data for reference images \n# extracted from the GUI canvas following successive manipulations.\n# This is NOT A PYTEST FIXTURE.\ndef reference_image_generator(reference_environment_gui):\n \"\"\" Generator of postscript data for reference images representing a \n `spheres_environment` GUI canvas as it is sequentially modified. \"\"\"\n \n # Initialize parameters.\n key_a = 'object_a'\n key_b = 'object_b'\n \n # Initialize shorthand.\n canvas = reference_environment_gui.gui.canvas\n extract = tkinter_canvas_postscript.extract\n \n # Image 0: baseline.\n canvas.update()\n yield extract(canvas)\n \n # Image 1: Initialize a new object and update the color property.\n obj_a = reference_environment_gui.initialize_object(key_a)\n obj_a.color = (0.0, 0.0, 1.0, 1.0)\n canvas.update()\n yield extract(canvas)\n \n # Image 2: Re-size the object.\n #obj_a.width = 0.10\n obj_a.radius = 0.20\n canvas.update()\n yield extract(canvas)\n \n # Image 3: Re-position the object.\n obj_a.position = (0.50, -0.55, 1.00)\n canvas.update()\n yield extract(canvas)\n \n # Image 4: Initialize a second object and update the color, position, \n # and size properties.\n obj_b = reference_environment_gui.initialize_object(key_b)\n obj_b.color = (0.0, 1.0, 0.0, 1.0)\n obj_b.position = (-0.25, 0.25, 0.00)\n #obj_b.width = 0.10\n obj_b.radius = 0.10\n canvas.update()\n yield extract(canvas)\n \n # Image 5: Move the second object to overlap with the first.\n obj_b.position = (0.35, -0.35, 1.0)\n canvas.update()\n yield extract(canvas)\n \n \n\n# Initialize an array of postscript data for reference images extracted from \n# the GUI canvas.\n@pytest.fixture\ndef reference_image_sequence(reference_environment_gui):\n \"\"\" Images -- represented as postscript data -- of a spheres_environment \n GUI canvas, as it is sequentially modified. \"\"\"\n \n # Initialize parameters.\n generator = reference_image_generator(reference_environment_gui)\n yield tuple(ps for ps in generator)\n \n # Clean up.\n pass\n \n \n\n# __main__\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n \n \n\n\n", "repo_name": "ricmua/tkinter_spheres_environment_gui", "sub_path": "test/fixtures.py", "file_name": "fixtures.py", "file_ext": "py", "file_size_in_byte": 3661, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "tkinter_spheres_environment_gui.Environment", "line_number": 21, "usage_type": "call"}, {"api_name": "tkinter_shapes.Polygon", "line_number": 31, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 107, "usage_type": "attribute"}, {"api_name": "doctest.testmod", "line_number": 124, "usage_type": "call"}]} +{"seq_id": "31447549849", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport random\nimport json\n\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse\nfrom django.utils import timezone\nfrom django.utils.text import slugify\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.sites.models import get_current_site\n\nfrom opps.images.models import Image\nfrom opps.containers.models import Container, ContainerImage\nfrom opps.images.generate import image_url\n\n\ndef response_mimetype(request):\n if \"application/json\" in request.META['HTTP_ACCEPT']:\n return \"application/json\"\n return \"text/plain\"\n\n\n@csrf_exempt\n@login_required(login_url='/admin/')\ndef image_create(request, container_pk=None):\n\n if container_pk:\n container = get_object_or_404(Container, pk=int(container_pk))\n title = container.title\n site = container.site\n user = request.user or container.user\n date_available = container.date_available\n else:\n container = None\n title = 'Uploaded Image'\n site = get_current_site(request)\n user = request.user\n date_available = timezone.now()\n\n if request.method == \"POST\":\n f = request.FILES.get('image')\n\n title = request.POST.get('title') or container.title\n caption = request.POST.get('caption', '')\n source = request.POST.get('source', '')\n tags = request.POST.get('tags', '')\n slug = slugify(title)\n slug = \"{0}-{1}\".format(slug[:100], random.getrandbits(32))\n\n instance = Image(\n site=site,\n user=user,\n date_available=date_available,\n title=title,\n slug=slug,\n archive=f,\n source=source,\n published=True,\n tags=tags,\n description=caption\n )\n instance.save()\n\n if container:\n order = request.POST.get('order', 0)\n ContainerImage.objects.create(\n container=container,\n image=instance,\n caption=caption,\n order=int(order)\n )\n\n data = [{'name': f.name,\n 'url': \"%s\" % instance.archive.url,\n 'thumbnail_url': \"%s\" % image_url(\n instance.archive.url,\n width=60,\n height=60\n ),\n \"delete_url\": \"\",\n \"delete_type\": \"DELETE\"}]\n response = JSONResponse(data, {}, response_mimetype(request))\n response['Content-Disposition'] = 'inline; filename=files.json'\n return response\n else:\n return render(request, 'fileupload/image_form.html',\n {'container': container})\n\n\nclass JSONResponse(HttpResponse):\n \"\"\"JSON response class.\"\"\"\n def __init__(self, obj='', json_opts={}, mimetype=\"application/json\",\n *args, **kwargs):\n content = json.dumps(obj, **json_opts)\n super(JSONResponse, self).__init__(content, mimetype, *args, **kwargs)\n", "repo_name": "opps/opps", "sub_path": "opps/contrib/fileupload/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3122, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 313, "dataset": "github-code", "pt": "97", "api": [{"api_name": "django.shortcuts.get_object_or_404", "line_number": 30, "usage_type": "call"}, {"api_name": "opps.containers.models.Container", "line_number": 30, "usage_type": "argument"}, {"api_name": "django.contrib.sites.models.get_current_site", "line_number": 38, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 40, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 40, "usage_type": "name"}, {"api_name": "django.utils.text.slugify", "line_number": 49, "usage_type": "call"}, {"api_name": "random.getrandbits", "line_number": 50, "usage_type": "call"}, {"api_name": "opps.images.models.Image", "line_number": 52, "usage_type": "call"}, {"api_name": "opps.containers.models.ContainerImage.objects.create", "line_number": 68, "usage_type": "call"}, {"api_name": "opps.containers.models.ContainerImage.objects", "line_number": 68, "usage_type": "attribute"}, {"api_name": "opps.containers.models.ContainerImage", "line_number": 68, "usage_type": "name"}, {"api_name": "opps.images.generate.image_url", "line_number": 77, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 88, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 25, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 26, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 92, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 96, "usage_type": "call"}]} +{"seq_id": "18926285807", "text": "\"\"\"\na config file to store all the necessary variables\n\"\"\"\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\n\nis_colorful = True\nif is_colorful:\n no_channels = 3\nelse:\n no_channels = 1\n\nimg_config = {\n 'img_height': 100, \n 'img_width':100, \n 'no_channels': no_channels,\n 'no_img': 2000\n }\n\nmodel_config = {\n 'no_epochs':30,\n 'test_size':0.20,\n 'ffn_layer_1':10,\n 'ffn_layer_2':10,\n 'filter_CNN':4,\n 'kernel_size':3,\n 'batch_size':128\n}\n\ndef split_data(images, labels):\n \"\"\"\n method to split data into training and testing data set\n\n :param images: np.ndarray, a numpy array of all the images\n :param labels: list, a list of all the labels\n :return [X_train, X_test, y_train, y_test]: list, a list of randomly shuffled training \n and testing data set\n \"\"\"\n X_train, X_test, y_train, y_test = train_test_split(images, labels, test_size=model_config['test_size'], random_state=42)\n y_train, y_test = np.array(y_train), np.array(y_test)\n return X_train, X_test, y_train, y_test\n", "repo_name": "kjosh10/Circle-Detector-from-a-Synthetic-Data", "sub_path": "config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 1076, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "sklearn.model_selection.train_test_split", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "74837613757", "text": "import argparse\n\nfrom Embedding_Preprocessing.encoder_tuple_level import TupleTokenizationMode\nfrom tangent_cft_back_end import TangentCFTBackEnd\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Given the configuration file for training Tangent_CFT model.'\n 'This function train the model and then does the retrieval task on'\n 'NTCIR-12 formula retrieval task.')\n\n parser.add_argument('--t', type=bool, help=\"Value True for training a new model and False for loading a model\",\n default=True)\n parser.add_argument('--r', type=bool, help=\"Value True to do the retrieval on NTCIR12 dataset\",\n default=True)\n parser.add_argument('-ds', type=str, help=\"File path of training data. If using NTCIR12 dataset, \"\n \"it should be MathTagArticles directory. If using the MSE dataset, it\"\n \"should be csv file of formula\", required=True)\n parser.add_argument('-cid', metavar='cid', type=int, help='Configuration file.', required=True)\n parser.add_argument('--wiki', type=bool, help=\"Determines if the dataset is wiki or not.\", default=True)\n parser.add_argument('--slt', type=bool, help=\"Determines to use slt (True) or opt(False)\", default=True)\n parser.add_argument('-em', type=str, help=\"File path for encoder map.\", required=True)\n parser.add_argument('--mp', type=str, help=\"Model file path.\", default=None)\n parser.add_argument('--qd', type=str, help=\"NTCIR12 query directory.\", default=None)\n parser.add_argument('--rf', type=str, help=\"Retrieval result file path.\", default=\"ret_res\")\n parser.add_argument('--ri', type=int, help=\"Run Id for Retrieval.\", default=1)\n parser.add_argument('--frp', type=bool, help=\"Determines to ignore full relative path\", default=True)\n parser.add_argument('--ta', type=bool, help=\"Determines to tokenize all\", default=False)\n parser.add_argument('--tn', type=bool, help=\"Determines to tokenize numbers\", default=True)\n parser.add_argument('--et', help='Embedding type; 1:Value, 2:Type, 3:Type and Value separated and'\n ' 4: Type and Value Not Separated', choices=range(1, 5),\n default=3, type=int)\n\n args = vars(parser.parse_args())\n\n train_model = args['t']\n do_retrieval = args['r']\n dataset_file_path = args['ds']\n config_id = args['cid']\n is_wiki = args['wiki']\n read_slt = args['slt']\n encoder_file_path = args['em']\n model_file_path = args['mp']\n res_file = args['rf']\n run_id = args['ri']\n ignore_full_relative_path = args['frp']\n tokenize_all = args['ta']\n tokenize_number = args['tn']\n queries_directory_path = args['qd']\n embedding_type = TupleTokenizationMode(args['et'])\n map_file_path = \"Embedding_Preprocessing/\" + str(encoder_file_path)\n config_file_path = \"Configuration/config/config_\" + str(config_id)\n\n system = TangentCFTBackEnd(config_file=config_file_path, path_data_set=dataset_file_path, is_wiki=is_wiki,\n read_slt=read_slt, queries_directory_path=queries_directory_path)\n if train_model:\n dictionary_formula_tuples_collection = system.train_model(\n map_file_path=map_file_path,\n model_file_path=model_file_path,\n embedding_type=embedding_type, ignore_full_relative_path=ignore_full_relative_path,\n tokenize_all=tokenize_all,\n tokenize_number=tokenize_number\n )\n if do_retrieval:\n retrieval_result = system.retrieval(dictionary_formula_tuples_collection,\n embedding_type, ignore_full_relative_path, tokenize_all,\n tokenize_number\n )\n system.create_result_file(retrieval_result, \"Retrieval_Results/\" + res_file, run_id)\n else:\n\n dictionary_formula_tuples_collection = system.load_model(\n map_file_path=map_file_path,\n model_file_path=model_file_path,\n embedding_type=embedding_type, ignore_full_relative_path=ignore_full_relative_path,\n tokenize_all=tokenize_all,\n tokenize_number=tokenize_number\n )\n if do_retrieval:\n retrieval_result = system.retrieval(dictionary_formula_tuples_collection,\n embedding_type, ignore_full_relative_path, tokenize_all,\n tokenize_number\n )\n system.create_result_file(retrieval_result, \"Retrieval_Results/\" + res_file, run_id)\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "BehroozMansouri/TangentCFT", "sub_path": "tangent_cft_front_end.py", "file_name": "tangent_cft_front_end.py", "file_ext": "py", "file_size_in_byte": 4870, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 39, "dataset": "github-code", "pt": "97", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 8, "usage_type": "call"}, {"api_name": "Embedding_Preprocessing.encoder_tuple_level.TupleTokenizationMode", "line_number": 50, "usage_type": "call"}, {"api_name": "tangent_cft_back_end.TangentCFTBackEnd", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "41468259841", "text": "# -*- coding: utf-8 -*-\nfrom urllib import parse\n\nimport scrapy\nfrom scrapy import Request\n\nfrom myspider.items import MyspiderItem\n\n\nclass DoubanspiderSpider(scrapy.Spider):\n name = 'doubanSpider'\n allowed_domains = ['douban.com']\n start_urls = ['https://movie.douban.com/top250']\n\n def parse(self, response):\n # 找出页面中每个电影的 url\n subjects_nodes = response.css(\".item\")\n for subject in subjects_nodes:\n # 封面图片的 url\n img_url = subject.css(\"div > a > img::attr(src)\").extract_first()\n subject_url = subject.css(\"div > a::attr(href)\").extract_first()\n # 通过 meta 传递下去\n yield Request(url=parse.urljoin(response.url, subject_url), meta={\"front_image_url\": img_url},\n callback=self.parse_detail)\n\n # 找到下一页 url, 递归调用\n next_url = response.css(\".next > a::attr(href)\").extract_first()\n if next_url:\n yield Request(url=parse.urljoin(response.url, next_url), callback=self.parse)\n\n @staticmethod\n def parse_detail(response):\n\n # 通过 CSS 选择器找出具体值\n title = response.css(\"h1>span:nth-child(1)::text\").extract_first()\n rating = response.css('.rating_num::text').extract_first()\n \n # 实例化对象\n subject_item = MyspiderItem()\n subject_item['title'] = title\n subject_item['douban_link'] = response.url\n subject_item['rating'] = rating\n\n # 移交 pipeline 流水线 \n yield subject_item", "repo_name": "imzhizi/myspider", "sub_path": "myspider/spiders/doubanSpider.py", "file_name": "doubanSpider.py", "file_ext": "py", "file_size_in_byte": 1578, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "scrapy.Spider", "line_number": 10, "usage_type": "attribute"}, {"api_name": "scrapy.Request", "line_number": 23, "usage_type": "call"}, {"api_name": "urllib.parse.urljoin", "line_number": 23, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 23, "usage_type": "name"}, {"api_name": "scrapy.Request", "line_number": 29, "usage_type": "call"}, {"api_name": "urllib.parse.urljoin", "line_number": 29, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 29, "usage_type": "name"}, {"api_name": "myspider.items.MyspiderItem", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "32601670093", "text": "from frontend import app, db, bcrypt\nfrom flask import render_template, flash, redirect, url_for\nfrom frontend.forms import RegistrationForm, LoginForm, ProjectForm, MonitorForm\nfrom frontend.models import User, Project, Device\nfrom flask_login import login_user, current_user, logout_user\nimport yaml\nimport json\n\nfrom nornir import InitNornir\nfrom constants import config_file\nfrom nafc.api.show import get_config\n\nnr = InitNornir(config_file=f\"{config_file}\")\n\n\n@app.route(\"/about\")\ndef about():\n return render_template(\"about.html\")\n\n\n@app.route(\"/\")\n@app.route(\"/register\", methods=[\"GET\", \"POST\"])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for(\"project\"))\n form = RegistrationForm()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(form.password.data).decode(\n \"utf-8\"\n )\n user = User(\n username=form.username.data, email=form.email.data, password=hashed_password\n )\n db.session.add(user)\n db.session.commit()\n flash(\"Your account has been created! You are now able to login!\", \"success\")\n return redirect(url_for(\"login\"))\n return render_template(\"register.html\", title=\"Register\", form=form)\n\n\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for(\"project\"))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n if user and bcrypt.check_password_hash(user.password, form.password.data):\n login_user(user, remember=form.remember.data)\n return redirect(url_for(\"project\"))\n else:\n flash(\"Login Unsuccessfully. Please check Username and Password!\", \"danger\")\n return render_template(\"login.html\", title=\"Login\", form=form)\n\n\n@app.route(\"/logout\")\ndef logout():\n logout_user()\n return redirect(url_for(\"login\"))\n\n\n@app.route(\"/project\")\ndef project():\n return render_template(\"project.html\", projects=current_user.projects)\n\n\n@app.route(\"/add_project\", methods=[\"GET\", \"POST\"])\ndef add_project():\n form = ProjectForm()\n if form.validate_on_submit():\n new_project = Project(\n name=form.name.data,\n description=form.description.data,\n config_file=form.configFile.data,\n user_id=current_user.id,\n inventory_file=form.inventoryFile.data,\n )\n db.session.add(new_project)\n db.session.commit()\n # flash('New project has been created!', 'success')\n with open(form.inventoryFile.data, \"r\") as f:\n device_list = yaml.full_load(f)\n for hostname, values in device_list.items():\n device = Device(\n name=hostname,\n project_id=new_project.id,\n management_ip=values[\"hostname\"],\n )\n db.session.add(device)\n db.session.commit()\n return redirect(url_for(\"project\"))\n return render_template(\"add_project.html\", title=\"Add Project\", form=form)\n\n\n@app.route(\"/project/\", methods=[\"GET\"])\ndef project_info(project_id):\n \"\"\"\n This page shows detailed stats on an individual project\n queried by project id\n \"\"\"\n multi_result = {}\n result2 = nr.run(task=get_config, command=\"sh version\")\n for device, device_result in result2.items():\n structured_device_result = device_result[\n 1\n ].scrapli_response.genie_parse_output()\n multi_result[device] = structured_device_result\n # print(multi_result)\n\n return render_template(\n \"project_detail.html\",\n project=Project.query.filter_by(id=project_id).first(),\n result=multi_result,\n )\n\n\n@app.route(\"/project//monitor\", methods=[\"GET\", \"POST\"])\ndef project_monitor(project_id):\n \"\"\"\n This page shows detailed stats on an individual project\n queried by project id\n \"\"\"\n form = MonitorForm()\n project = Project.query.filter_by(id=project_id).first()\n form.device.choices = [device.name for device in project.devices]\n form.command.choices = [\n \"sh version\",\n \"sh interfaces\",\n \"sh clock\",\n \"sh ntp associations\",\n \"sh ntp status\",\n \"sh int description\",\n \"sh ip int brief\",\n \"sh ip protocol\",\n \"sh ip route\",\n \"sh arp\",\n \"sh ip eigrp int\",\n \"sh ip eigrp nei\",\n \"sh ip ospf int bri\",\n \"sh ip ospf nei\",\n \"sh ip bgp sum\",\n \"sh ip bgp\",\n \"sh ip bgp nei\",\n \"sh ip bgp rib-failure\",\n \"sh ip cef\",\n \"sh ip cef inconsistency\",\n \"sh standby bri\",\n \"sh vrrp bri\",\n \"sh vrrp all\",\n \"sh ip nhrp\", \n \"sh dmvpn detail\",\n \"sh crypto session\",\n \"sh crypto isakmp sa\",\n \"sh crypto ikev2 sa\",\n \"sh crypto ipsec sa\",\n \"sh bfd sum\",\n \"sh bfd nei\",\n \"sh ip policy\",\n \"sh ip local policy\",\n \"sh ip cache policy\",\n \"sh policy-map\",\n \"sh route-map\",\n \"sh access-lists\",\n \"sh track bri\",\n \"sh track\",\n \"sh process cpu sorted\",\n \"sh process memory sorted\",\n \"sh boot\",\n \"sh bootvar\",\n \"sh inv\",\n \"dir all-filesystems\",\n \"sh users all\",\n \"sh line\",\n \"sh log\"\n ]\n device_name = form.device.data\n command = form.command.data\n if device_name:\n result = None\n nr2 = nr.filter(name=f\"{device_name}\")\n result = nr2.run(task=get_config, command=command)\n # breakpoint()\n # print(result)\n result_json = result[device_name][1].scrapli_response.genie_parse_output()\n\n return render_template(\n \"monitor_project.html\",\n project=project,\n result_raw=result[device_name][1].result,\n result_json=json.dumps(result_json, indent=2),\n form=form,\n )\n else:\n return render_template(\n \"monitor_project.html\",\n project=project,\n form=form,\n )\n\n\n@app.route(\"/project//configure\", methods=[\"GET\", \"POST\"])\ndef configure(project_id):\n return render_template(\n \"configure_project.html\",\n title=\"Configure Project\",\n project=Project.query.filter_by(id=project_id).first(),\n )\n\n\n@app.route(\"/project//configure/\", methods=[\"GET\", \"POST\"])\ndef current_configure(project_id, device):\n result = nr.run(task=get_config, command=\"sh config\")\n x = result[device][1].result.splitlines()\n # with open(f'{device}.txt', 'w') as a:\n # a.write(result[device][1].result)\n x2 = result[device][1].result\n # print(x)\n return render_template(\n \"configure_project.html\",\n title=\"Configure Project\",\n project=Project.query.filter_by(id=project_id).first(),\n device=Device.query.filter_by(project_id=project_id, name=device).first(),\n result=x,\n result2=x2,\n )\n", "repo_name": "kimdoanh89/fastcli", "sub_path": "frontend/routes.py", "file_name": "routes.py", "file_ext": "py", "file_size_in_byte": 7070, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 46, "dataset": "github-code", "pt": "97", "api": [{"api_name": "nornir.InitNornir", "line_number": 13, "usage_type": "call"}, {"api_name": "constants.config_file", "line_number": 13, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 18, "usage_type": "call"}, {"api_name": "frontend.app.route", "line_number": 16, "usage_type": "call"}, {"api_name": "frontend.app", "line_number": 16, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 25, "usage_type": "call"}, {"api_name": "frontend.forms.RegistrationForm", "line_number": 26, "usage_type": "call"}, {"api_name": "frontend.bcrypt.generate_password_hash", "line_number": 28, "usage_type": "call"}, {"api_name": "frontend.bcrypt", "line_number": 28, "usage_type": "name"}, {"api_name": "frontend.models.User", "line_number": 31, "usage_type": "call"}, {"api_name": "frontend.db.session.add", "line_number": 34, "usage_type": "call"}, {"api_name": "frontend.db.session", "line_number": 34, "usage_type": "attribute"}, {"api_name": "frontend.db", "line_number": 34, "usage_type": "name"}, {"api_name": "frontend.db.session.commit", "line_number": 35, "usage_type": "call"}, {"api_name": "frontend.db.session", "line_number": 35, "usage_type": "attribute"}, {"api_name": "frontend.db", "line_number": 35, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 38, "usage_type": "call"}, {"api_name": "frontend.app.route", "line_number": 21, "usage_type": "call"}, {"api_name": "frontend.app", "line_number": 21, "usage_type": "name"}, {"api_name": "frontend.app.route", "line_number": 22, "usage_type": "call"}, {"api_name": "frontend.app", "line_number": 22, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 43, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 43, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 44, "usage_type": "call"}, {"api_name": "frontend.forms.LoginForm", "line_number": 45, "usage_type": "call"}, {"api_name": "frontend.models.User.query.filter_by", "line_number": 47, "usage_type": "call"}, {"api_name": "frontend.models.User.query", "line_number": 47, "usage_type": "attribute"}, {"api_name": "frontend.models.User", "line_number": 47, "usage_type": "name"}, {"api_name": "frontend.bcrypt.check_password_hash", "line_number": 48, "usage_type": "call"}, {"api_name": "frontend.bcrypt", "line_number": 48, "usage_type": "name"}, {"api_name": "flask_login.login_user", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 53, "usage_type": "call"}, {"api_name": "frontend.app.route", "line_number": 41, "usage_type": "call"}, {"api_name": "frontend.app", "line_number": 41, "usage_type": "name"}, {"api_name": "flask_login.logout_user", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 59, "usage_type": "call"}, {"api_name": "frontend.app.route", "line_number": 56, "usage_type": "call"}, {"api_name": "frontend.app", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 64, "usage_type": "call"}, {"api_name": "flask_login.current_user.projects", "line_number": 64, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 64, "usage_type": "name"}, {"api_name": "frontend.app.route", "line_number": 62, "usage_type": "call"}, {"api_name": "frontend.app", "line_number": 62, "usage_type": "name"}, {"api_name": "frontend.forms.ProjectForm", "line_number": 69, "usage_type": "call"}, {"api_name": "frontend.models.Project", "line_number": 71, "usage_type": "call"}, {"api_name": "flask_login.current_user.id", "line_number": 75, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 75, "usage_type": "name"}, {"api_name": "frontend.db.session.add", "line_number": 78, "usage_type": "call"}, {"api_name": "frontend.db.session", "line_number": 78, "usage_type": "attribute"}, {"api_name": "frontend.db", "line_number": 78, "usage_type": "name"}, {"api_name": "frontend.db.session.commit", "line_number": 79, "usage_type": "call"}, {"api_name": "frontend.db.session", "line_number": 79, "usage_type": "attribute"}, {"api_name": "frontend.db", "line_number": 79, "usage_type": "name"}, {"api_name": "yaml.full_load", "line_number": 82, "usage_type": "call"}, {"api_name": "frontend.models.Device", "line_number": 84, "usage_type": "call"}, {"api_name": "frontend.db.session.add", "line_number": 89, "usage_type": "call"}, {"api_name": "frontend.db.session", "line_number": 89, "usage_type": "attribute"}, {"api_name": "frontend.db", "line_number": 89, "usage_type": "name"}, {"api_name": "frontend.db.session.commit", "line_number": 90, "usage_type": "call"}, {"api_name": "frontend.db.session", "line_number": 90, "usage_type": "attribute"}, {"api_name": "frontend.db", "line_number": 90, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 91, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 91, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 92, "usage_type": "call"}, {"api_name": "frontend.app.route", "line_number": 67, "usage_type": "call"}, {"api_name": "frontend.app", "line_number": 67, "usage_type": "name"}, {"api_name": "nafc.api.show.get_config", "line_number": 102, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 110, "usage_type": "call"}, {"api_name": "frontend.models.Project.query.filter_by", "line_number": 112, "usage_type": "call"}, {"api_name": "frontend.models.Project.query", "line_number": 112, "usage_type": "attribute"}, {"api_name": "frontend.models.Project", "line_number": 112, "usage_type": "name"}, {"api_name": "frontend.app.route", "line_number": 95, "usage_type": "call"}, {"api_name": "frontend.app", "line_number": 95, "usage_type": "name"}, {"api_name": "frontend.forms.MonitorForm", "line_number": 123, "usage_type": "call"}, {"api_name": "frontend.models.Project.query.filter_by", "line_number": 124, "usage_type": "call"}, {"api_name": "frontend.models.Project.query", "line_number": 124, "usage_type": "attribute"}, {"api_name": "frontend.models.Project", "line_number": 124, "usage_type": "name"}, {"api_name": "nafc.api.show.get_config", "line_number": 181, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 186, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 190, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 194, "usage_type": "call"}, {"api_name": "frontend.app.route", "line_number": 117, "usage_type": "call"}, {"api_name": "frontend.app", "line_number": 117, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 203, "usage_type": "call"}, {"api_name": "frontend.models.Project.query.filter_by", "line_number": 206, "usage_type": "call"}, {"api_name": "frontend.models.Project.query", "line_number": 206, "usage_type": "attribute"}, {"api_name": "frontend.models.Project", "line_number": 206, "usage_type": "name"}, {"api_name": "frontend.app.route", "line_number": 201, "usage_type": "call"}, {"api_name": "frontend.app", "line_number": 201, "usage_type": "name"}, {"api_name": "nafc.api.show.get_config", "line_number": 212, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 218, "usage_type": "call"}, {"api_name": "frontend.models.Project.query.filter_by", "line_number": 221, "usage_type": "call"}, {"api_name": "frontend.models.Project.query", "line_number": 221, "usage_type": "attribute"}, {"api_name": "frontend.models.Project", "line_number": 221, "usage_type": "name"}, {"api_name": "frontend.models.Device.query.filter_by", "line_number": 222, "usage_type": "call"}, {"api_name": "frontend.models.Device.query", "line_number": 222, "usage_type": "attribute"}, {"api_name": "frontend.models.Device", "line_number": 222, "usage_type": "name"}, {"api_name": "frontend.app.route", "line_number": 210, "usage_type": "call"}, {"api_name": "frontend.app", "line_number": 210, "usage_type": "name"}]} +{"seq_id": "6245843587", "text": "#!/usr/bin/env python\nimport roslib; roslib.load_manifest('xsens_driver')\nimport rospy\nimport select\n\nimport mtdevice\n\nfrom std_msgs.msg import Header, Float32\nfrom sensor_msgs.msg import Imu, NavSatFix, NavSatStatus\nfrom geometry_msgs.msg import TwistStamped, Vector3Stamped\nfrom gps_common.msg import GPSFix, GPSStatus\nfrom diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus, KeyValue\n\n\n# transform Euler angles or matrix into quaternions\nfrom math import pi, radians\nfrom tf.transformations import quaternion_from_matrix, quaternion_from_euler, identity_matrix\n\ndef get_param(name, default):\n\ttry:\n\t\tv = rospy.get_param(name)\n\t\trospy.loginfo(\"Found parameter: %s, value: %s\"%(name, str(v)))\n\texcept KeyError:\n\t\tv = default\n\t\trospy.logwarn(\"Cannot find value for parameter: %s, assigning \"\n\t\t\t\t\"default: %s\"%(name, str(v)))\n\treturn v\n\nclass XSensDriver(object):\n\t\n\tdef __init__(self):\n\t\t\n\t\tdevice = get_param('~device', 'auto')\n\t\tbaudrate = get_param('~baudrate', 0)\n\t\tif device=='auto':\n\t\t\tdevs = mtdevice.find_devices()\n\t\t\tif devs:\n\t\t\t\tdevice, baudrate = devs[0]\n\t\t\t\trospy.loginfo(\"Detected MT device on port %s @ %d bps\"%(device,\n\t\t\t\t\t\tbaudrate))\n\t\t\telse:\n\t\t\t\trospy.logerr(\"Fatal: could not find proper MT device.\")\n\t\t\t\trospy.signal_shutdown(\"Could not find proper MT device.\")\n\t\t\t\treturn\n\t\tif not baudrate:\n\t\t\tbaudrate = mtdevice.find_baudrate(device)\n\t\tif not baudrate:\n\t\t\trospy.logerr(\"Fatal: could not find proper baudrate.\")\n\t\t\trospy.signal_shutdown(\"Could not find proper baudrate.\")\n\t\t\treturn\n\n\t\trospy.loginfo(\"MT node interface: %s at %d bd.\"%(device, baudrate))\n\t\tself.mt = mtdevice.MTDevice(device, baudrate)\n\n\t\tself.frame_id = get_param('~frame_id', '/base_imu')\n\t\t\n\t\tself.diag_pub = rospy.Publisher('/diagnostics', DiagnosticArray)\n\t\tself.diag_msg = DiagnosticArray()\n\t\tself.stest_stat = DiagnosticStatus(name='mtnode: Self Test', level=1,\n\t\t\t\tmessage='No status information')\n\t\tself.xkf_stat = DiagnosticStatus(name='mtnode: XKF Valid', level=1,\n\t\t\t\tmessage='No status information')\n\t\tself.gps_stat = DiagnosticStatus(name='mtnode: GPS Fix', level=1,\n\t\t\t\tmessage='No status information')\n\t\tself.diag_msg.status = [self.stest_stat, self.xkf_stat, self.gps_stat]\n\n\t\tself.imu_pub = rospy.Publisher('imu/data', Imu)\n\t\tself.gps_pub = rospy.Publisher('fix', NavSatFix)\n\t\tself.xgps_pub = rospy.Publisher('fix_extended', GPSFix)\n\t\tself.vel_pub = rospy.Publisher('velocity', TwistStamped)\n\t\tself.mag_pub = rospy.Publisher('magnetic', Vector3Stamped)\n\t\tself.temp_pub = rospy.Publisher('temperature', Float32)\t# decide type\n\t\t# TODO pressure, ITOW from raw GPS?\n\t\tself.old_bGPS = 256\t# publish GPS only if new\n\n\n\n\tdef spin(self):\n\t\ttry:\n\t\t\twhile not rospy.is_shutdown():\n\t\t\t\tself.spin_once()\n\t\t# Ctrl-C signal interferes with select with the ROS signal handler\n\t\t# should be OSError in python 3.?\n\t\texcept select.error:\n\t\t\tpass\n\n\tdef spin_once(self):\n\n\t\tdef quat_from_orient(orient):\n\t\t\t'''Build a quaternion from orientation data.'''\n\t\t\ttry:\n\t\t\t\tw, x, y, z = orient['quaternion']\n\t\t\t\treturn (x, y, z, w)\n\t\t\texcept KeyError:\n\t\t\t\tpass\n\t\t\ttry:\n\t\t\t\treturn quaternion_from_euler(pi*orient['roll']/180.,\n\t\t\t\t\t\tpi*orient['pitch']/180, pi*orient['yaw']/180.)\n\t\t\texcept KeyError:\n\t\t\t\tpass\n\t\t\ttry:\n\t\t\t\tm = identity_matrix()\n\t\t\t\tm[:3,:3] = orient['matrix']\n\t\t\t\treturn quaternion_from_matrix(m)\n\t\t\texcept KeyError:\n\t\t\t\tpass\n\n\t\t# get data\n\t\tdata = self.mt.read_measurement()\n\t\t# common header\n\t\th = Header()\n\t\th.stamp = rospy.Time.now()\n\t\th.frame_id = self.frame_id\n\t\t\n\t\t# get data (None if not present)\n\t\ttemp = data.get('Temp')\t# float\n\t\traw_data = data.get('RAW')\n\t\timu_data = data.get('Calib')\n\t\torient_data = data.get('Orient')\n\t\tvelocity_data = data.get('Velocity')\n\t\tposition_data = data.get('Position')\n\t\trawgps_data = data.get('RAWGPS')\n\t\tstatus = data.get('Status')\t# int\n\n\t\t# create messages and default values\n\t\timu_msg = Imu()\n\t\timu_msg.orientation_covariance = (-1., )*9\n\t\timu_msg.angular_velocity_covariance = (-1., )*9\n\t\timu_msg.linear_acceleration_covariance = (-1., )*9\n\t\tpub_imu = False\n\t\tgps_msg = NavSatFix()\n\t\txgps_msg = GPSFix()\n\t\tpub_gps = False\n\t\tvel_msg = TwistStamped()\n\t\tpub_vel = False\n\t\tmag_msg = Vector3Stamped()\n\t\tpub_mag = False\n\t\ttemp_msg = Float32()\n\t\tpub_temp = False\n\t\t\n\t\t# fill information where it's due\n\t\t# start by raw information that can be overriden\n\t\tif raw_data: # TODO warn about data not calibrated\n\t\t\tpub_imu = True\n\t\t\tpub_vel = True\n\t\t\tpub_mag = True\n\t\t\tpub_temp = True\n\t\t\t# acceleration\n\t\t\timu_msg.linear_acceleration.x = raw_data['accX']\n\t\t\timu_msg.linear_acceleration.y = raw_data['accY']\n\t\t\timu_msg.linear_acceleration.z = raw_data['accZ']\n\t\t\timu_msg.linear_acceleration_covariance = (0., )*9\n\t\t\t# gyroscopes\n\t\t\timu_msg.angular_velocity.x = raw_data['gyrX']\n\t\t\timu_msg.angular_velocity.y = raw_data['gyrY']\n\t\t\timu_msg.angular_velocity.z = raw_data['gyrZ']\n\t\t\timu_msg.angular_velocity_covariance = (0., )*9\n\t\t\tvel_msg.twist.angular.x = raw_data['gyrX']\n\t\t\tvel_msg.twist.angular.y = raw_data['gyrY']\n\t\t\tvel_msg.twist.angular.z = raw_data['gyrZ']\n\t\t\t# magnetometer\n\t\t\tmag_msg.vector.x = raw_data['magX']\n\t\t\tmag_msg.vector.y = raw_data['magY']\n\t\t\tmag_msg.vector.z = raw_data['magZ']\n\t\t\t# temperature\n\t\t\t# 2-complement decoding and 1/256 resolution\n\t\t\tx = raw_data['temp']\n\t\t\tif x&0x8000:\n\t\t\t\ttemp_msg.data = (x - 1<<16)/256.\n\t\t\telse:\n\t\t\t\ttemp_msg.data = x/256.\n\t\tif rawgps_data:\n\t\t\tif rawgps_data['bGPS']/')\r\ndef addtoCart(id, qty):\r\n updateCart(id, qty)\r\n return redirect(request.referrer)\r\n\r\n@bp.route('/remove/')\r\ndef remove(id):\r\n cart = session.get('cart')\r\n if not cart: return abort(404)\r\n for i in cart:\r\n if str(i['id']) == str(id):\r\n cart.remove(i)\r\n session['cart'] = cart\r\n flash('Item removed from cart')\r\n return redirect(request.referrer)\r\n\r\n@bp.route('/checkout')\r\ndef checkout():\r\n scheduled = session.get('scheduled')\r\n items = getCartItems()\r\n total = getTotal()\r\n if not items: abort(404)\r\n return render_template('main/checkout.html', title='Checkout', items=items, total=total, scheduled=scheduled)\r\n\r\n@bp.route('/scheduled', methods=['GET', 'POST'])\r\ndef scheduled():\r\n min = datetime.today().strftime('%Y-%m-%d')\r\n max = (datetime.today() + timedelta(days=30)).strftime('%Y-%m-%d')\r\n\r\n if request.method == 'POST':\r\n date = request.form.get('date') # e.g. \"2022-07-23\"\r\n time = request.form.get('time') # e.g. \"12:00\"\r\n session['scheduled'] = {'date': date, 'time': time}\r\n return redirect(url_for('main.checkout'))\r\n return render_template('main/scheduled.html', title='Scheduled Order', min=min, max=max)\r\n\r\n@bp.route('/payment', methods=['GET', 'POST'])\r\ndef payment():\r\n items = session.get('cart')\r\n if not items: abort(404)\r\n # make order, pop session scheduled and cart\r\n if request.method == 'POST':\r\n payment = request.form.get('payment')\r\n name = str(request.form.get('name')).title()\r\n email = request.form.get('email')\r\n phone = request.form.get('phone')\r\n phone = request.form.get('phone')\r\n note = request.form.get('note')\r\n next = int(query_db('select seq from sqlite_sequence where name=\"orders\"', one=True)['seq']) + 1\r\n hash = generate_password_hash(current_app.config['SECRET_KEY'] + str(next))\r\n due = datetime.now()\r\n timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\r\n if 'scheduled' in session:\r\n scheduled = session.get('scheduled')\r\n due = scheduled['date'] + ' ' + scheduled['time']\r\n due = datetime.strptime(due, \"%Y-%m-%d %H:%M\").strftime('%Y-%m-%d %H:%M:%S')\r\n\r\n query_db('insert into orders values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',\r\n (None, name, email, phone, due, timestamp, note, payment, 0, hash, 1))\r\n for item in items:\r\n query_db('insert into order_items values (?, ?, ?)',\r\n (str(next), item['id'], item['qty']))\r\n get_db().commit()\r\n # send email\r\n session.pop('cart', None)\r\n session.pop('scheduled', None)\r\n flash('An order confirmation will be sent to your email.')\r\n return redirect(url_for('main.index'))\r\n return render_template('main/payment.html', title='Payment')\r\n\r\n@bp.route('/lookup', methods=['GET', 'POST'])\r\ndef lookup():\r\n if request.method == 'POST':\r\n hash = request.form.get('hash')\r\n order = query_db('select * from orders where hash = ?', (hash,), one=True)\r\n if order:\r\n return redirect(url_for('main.order', hash=hash))\r\n else:\r\n abort(404)\r\n return render_template('main/lookup.html', title='Order Lookup')\r\n\r\n@bp.route('/order/')\r\ndef order(hash):\r\n status = {\"0\": \"Cancelled\", \"1\": \"Created\", \"2\": \"Preparing\", \"3\": \"Ready\"}\r\n progress = {\"0\": \"0\", \"1\": \"33\", \"2\": \"66\", \"3\": \"100\"}\r\n order = query_db('select * from orders where hash = ?', (hash,), one=True)\r\n if not order: abort(401)\r\n\r\n items = query_db('''select * from order_items oi join menu m\r\n on oi.item_id = m.id where oi.order_id = ?''',\r\n (order['id'],))\r\n order = to_object(order)\r\n order['progress'] = progress[str(order['status'])]\r\n order['status'] = status[str(order['status'])]\r\n return render_template('main/order.html', title=f'Order #{order[\"id\"]:02d}', order=order, items=items)\r\n\r\n@bp.route('/contact')\r\ndef contact():\r\n return render_template('main/contact.html', title='Contact')\r\n", "repo_name": "kaungsetlin/restaurant", "sub_path": "app/main/routes.py", "file_name": "routes.py", "file_ext": "py", "file_size_in_byte": 5303, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "db.query_db", "line_number": 10, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 14, "usage_type": "call"}, {"api_name": "app.main.bp.route", "line_number": 8, "usage_type": "call"}, {"api_name": "app.main.bp", "line_number": 8, "usage_type": "name"}, {"api_name": "db.query_db", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 21, "usage_type": "call"}, {"api_name": "app.main.bp.route", "line_number": 16, "usage_type": "call"}, {"api_name": "app.main.bp", "line_number": 16, "usage_type": "name"}, {"api_name": "app.main.utils.getCartItems", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 26, "usage_type": "call"}, {"api_name": "app.main.bp.route", "line_number": 23, "usage_type": "call"}, {"api_name": "app.main.bp", "line_number": 23, "usage_type": "name"}, {"api_name": "app.main.utils.updateCart", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.request.referrer", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 31, "usage_type": "name"}, {"api_name": "app.main.bp.route", "line_number": 28, "usage_type": "call"}, {"api_name": "app.main.bp", "line_number": 28, "usage_type": "name"}, {"api_name": "flask.session.get", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 35, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 42, "usage_type": "call"}, {"api_name": "flask.request.referrer", "line_number": 42, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 42, "usage_type": "name"}, {"api_name": "app.main.bp.route", "line_number": 33, "usage_type": "call"}, {"api_name": "app.main.bp", "line_number": 33, "usage_type": "name"}, {"api_name": "flask.session.get", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 46, "usage_type": "name"}, {"api_name": "app.main.utils.getCartItems", "line_number": 47, "usage_type": "call"}, {"api_name": "app.main.utils.getTotal", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 50, "usage_type": "call"}, {"api_name": "app.main.bp.route", "line_number": 44, "usage_type": "call"}, {"api_name": "app.main.bp", "line_number": 44, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 54, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 54, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 55, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 55, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 57, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 57, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 58, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 58, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 59, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 59, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 60, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 61, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 61, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 62, "usage_type": "call"}, {"api_name": "app.main.bp.route", "line_number": 52, "usage_type": "call"}, {"api_name": "app.main.bp", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.session.get", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 66, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 69, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 69, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 70, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 70, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 70, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 71, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 71, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 71, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 72, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 72, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 73, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 73, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 74, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 74, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 75, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 75, "usage_type": "name"}, {"api_name": "db.query_db", "line_number": 76, "usage_type": "call"}, {"api_name": "werkzeug.security.generate_password_hash", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.current_app.config", "line_number": 77, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 77, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 78, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 78, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 79, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 79, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 80, "usage_type": "name"}, {"api_name": "flask.session.get", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 81, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 83, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 83, "usage_type": "name"}, {"api_name": "db.query_db", "line_number": 85, "usage_type": "call"}, {"api_name": "db.query_db", "line_number": 88, "usage_type": "call"}, {"api_name": "db.get_db", "line_number": 90, "usage_type": "call"}, {"api_name": "flask.session.pop", "line_number": 92, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 92, "usage_type": "name"}, {"api_name": "flask.session.pop", "line_number": 93, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 93, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 94, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 96, "usage_type": "call"}, {"api_name": "app.main.bp.route", "line_number": 64, "usage_type": "call"}, {"api_name": "app.main.bp", "line_number": 64, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 100, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 100, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 101, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 101, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 101, "usage_type": "name"}, {"api_name": "db.query_db", "line_number": 102, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 104, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 104, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 106, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 107, "usage_type": "call"}, {"api_name": "app.main.bp.route", "line_number": 98, "usage_type": "call"}, {"api_name": "app.main.bp", "line_number": 98, "usage_type": "name"}, {"api_name": "db.query_db", "line_number": 113, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 114, "usage_type": "call"}, {"api_name": "db.query_db", "line_number": 116, "usage_type": "call"}, {"api_name": "db.to_object", "line_number": 119, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 122, "usage_type": "call"}, {"api_name": "app.main.bp.route", "line_number": 109, "usage_type": "call"}, {"api_name": "app.main.bp", "line_number": 109, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 126, "usage_type": "call"}, {"api_name": "app.main.bp.route", "line_number": 124, "usage_type": "call"}, {"api_name": "app.main.bp", "line_number": 124, "usage_type": "name"}]} +{"seq_id": "73776685758", "text": "from multiprocessing.sharedctypes import Value\nimport os\nimport sys\nfrom csv import reader\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\nimport neurokit2 as nk\nimport hrvanalysis as hrvana\n\n\ndef is_cognitively_fatigued(block):\n # If the subject is in the first 3 blocks, we assume they are not fatigued\n block_num = int(block.split(\"_\")[0][-1])\n return block_num, 0 if block_num <= 3 else 1\n\n\ndef get_n_back_score(block_dir):\n # return the final score (float from 0.0-1.0) of the N-back task that the user played (0-100)\n n_back_file = None\n for content in os.listdir(block_dir):\n if \".csv\" in content:\n n_back_file = content\n break\n if not n_back_file:\n return 0\n\n with open(os.path.join(block_dir, n_back_file), \"r\") as file:\n csv_reader = reader(file)\n last_row = list(csv_reader)[-1]\n\n # Example row: ['3_Letter_C.png', '', '11', '0', '67', '1', '97.46835443037975', '0.0']\n # Second last column of the last row represents the final score in the game\n return round(float(last_row[-2]) / 100, 2)\n\n\ndef extract_EMG_features():\n pass\n\n\ndef extract_EDA_features():\n pass\n\n\ndef extract_ECG_features(ecg_df):\n peaks, info = nk.ecg_peaks(ecg_df.values, sampling_rate=1000)\n hrv_features = nk.hrv(peaks, sampling_rate=1000, show=False)\n return hrv_features\n\n\ndef extract_features(block_dir):\n bsp_dir = os.path.join(block_dir, \"bsp\")\n bsp_file_name = os.listdir(bsp_dir)[0]\n bsp_file_path = os.path.join(bsp_dir, bsp_file_name)\n # bsp_data contains: [ECG, GSR, Breathing, EMG, PulOxR, PulOxIR] columns\n bsp_data = pd.read_csv(bsp_file_path)\n ecg_features = extract_ECG_features(bsp_data[\"ECG\"])\n return ecg_features\n\n\nif __name__ == \"__main__\":\n cog_data_dir = \"/home/ashish/Documents/github/VA/data/cognitive_data\"\n phy_data_dir = \"/home/ashish/Documents/github/VA/data/physical_data\"\n session_counter = 0\n data_features = pd.DataFrame([])\n # For logging error outputs\n old_stdout = sys.stdout\n log_file = open(\"data_log.log\", \"w\")\n sys.stdout = log_file\n\n for user_id in range(1, 10):\n user_dir = os.path.join(cog_data_dir, f\"user_{user_id}\")\n for session in os.listdir(user_dir):\n session_dir = os.path.join(user_dir, session)\n for block in os.listdir(session_dir):\n # Sanity check if the directory has the name \"block\" or not\n if \"block\" not in block or \"practice\" in block.lower():\n # Ignore directories other than block\n continue\n\n try:\n block_dir = os.path.join(session_dir, block)\n # For each block, we want to extract three different sets of data\n score = get_n_back_score(block_dir)\n print(\n f\"{session_counter+1}. Score: {score} | Session: {session[-1]} | User_ID: {user_id}\"\n )\n session_counter += 1\n bsp_features = extract_features(block_dir)\n block_num, cog_fatigue = is_cognitively_fatigued(block)\n bsp_features.insert(loc=0, column=\"user_id\", value=user_id)\n bsp_features.insert(loc=1, column=\"block_num\", value=block_num)\n bsp_features[\"n_back_score\"] = score\n bsp_features[\"cog_fatigue\"] = cog_fatigue\n data_features = data_features.append(\n bsp_features, ignore_index=True\n )\n\n except Exception as error:\n print(\"--------------------\")\n print(\n f\"ERROR: {session_counter+1}. Score: {score} | Session: {session[-1]} | User_ID: {user_id}\"\n )\n print(error)\n print(\"--------------------\")\n\n data_features.to_csv(\"ecg_hrv_features.csv\")\n sys.stdout = old_stdout\n log_file.close()\n", "repo_name": "asheeshcric/VA", "sub_path": "code/scripts/extract_features.py", "file_name": "extract_features.py", "file_ext": "py", "file_size_in_byte": 4037, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "os.listdir", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 32, "usage_type": "call"}, {"api_name": "neurokit2.ecg_peaks", "line_number": 49, "usage_type": "call"}, {"api_name": "neurokit2.hrv", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 59, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 68, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 70, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 72, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 111, "usage_type": "attribute"}]} +{"seq_id": "23951170872", "text": "import os\nimport argparse\nfrom os.path import join, exists\nimport subprocess as sp\nimport json\nimport tempfile\nimport multiprocessing as mp\nfrom time import time\nfrom datetime import timedelta\nimport queue\nimport logging\nfrom itertools import combinations\n\nfrom cytoolz import curry\nfrom pyrouge.utils import log\nfrom pyrouge import Rouge155\n\nfrom transformers import BertTokenizer, RobertaTokenizer\n# from transformers import BertTokenizer\n\nMAX_LEN = 512\n\n_ROUGE_PATH = '/root/moming/code/SciSoft/ROUGE-1.5.5'\ntemp_path = '/root/moming/code/MatchSum_efl/preprocess/temp' # path to store some temporary files\n\noriginal_data, sent_ids = [], []\n\ndef load_jsonl(data_path):\n data = []\n with open(data_path) as f:\n for line in f:\n data.append(json.loads(line))\n return data\n\ndef get_rouge(path, dec):\n log.get_global_console_logger().setLevel(logging.WARNING)\n dec_pattern = '(\\d+).dec'\n ref_pattern = '#ID#.ref'\n dec_dir = join(path, 'decode')\n ref_dir = join(path, 'reference')\n\n with open(join(dec_dir, '0.dec'), 'w') as f:\n for sentence in dec:\n print(sentence, file=f)\n\n cmd = '-c 95 -r 1000 -n 2 -m'\n with tempfile.TemporaryDirectory() as tmp_dir:\n Rouge155.convert_summaries_to_rouge_format(\n dec_dir, join(tmp_dir, 'dec'))\n Rouge155.convert_summaries_to_rouge_format(\n ref_dir, join(tmp_dir, 'ref'))\n Rouge155.write_config_static(\n join(tmp_dir, 'dec'), dec_pattern,\n join(tmp_dir, 'ref'), ref_pattern,\n join(tmp_dir, 'settings.xml'), system_id=1\n )\n cmd = (join(_ROUGE_PATH, 'ROUGE-1.5.5.pl')\n + ' -e {} '.format(join(_ROUGE_PATH, 'data'))\n + cmd\n + ' -a {}'.format(join(tmp_dir, 'settings.xml')))\n output = sp.check_output(cmd.split(' '), universal_newlines=True)\n\n line = output.split('\\n')\n rouge1 = float(line[3].split(' ')[3])\n rouge2 = float(line[7].split(' ')[3])\n rougel = float(line[11].split(' ')[3])\n return (rouge1 + rouge2 + rougel) / 3\n\n@curry\ndef get_candidates(tokenizer, cls, sep_id, idx):\n\n idx_path = join(temp_path, str(idx))\n\n # create some temporary files to calculate ROUGE\n #if not os.path.exists(idx_path):\n sp.call('mkdir ' + idx_path, shell=True)\n #if not os.path.exists(join(idx_path, 'decode')):\n sp.call('mkdir ' + join(idx_path, 'decode'), shell=True)\n #if not os.path.exists(join(idx_path, 'reference')):\n sp.call('mkdir ' + join(idx_path, 'reference'), shell=True)\n\n # load data\n data = {}\n data['text'] = original_data[idx]['text']\n data['summary'] = original_data[idx]['summary']\n\n # write reference summary to temporary files\n ref_dir = join(idx_path, 'reference')\n with open(join(ref_dir, '0.ref'), 'w') as f:\n for sentence in data['summary']:\n print(sentence, file=f)\n\n # get candidate summaries\n # here is for CNN/DM: truncate each document into the 5 most important sentences (using BertExt),\n # then select any 2 or 3 sentences to form a candidate summary, so there are C(5,2)+C(5,3)=20 candidate summaries.\n # if you want to process other datasets, you may need to adjust these numbers according to specific situation.\n sent_id = original_data[idx]['sent_id'][:5]\n indices = list(combinations(sent_id, 2))\n indices += list(combinations(sent_id, 3))\n #indices = list(combinations(sent_id, 3))\n if len(sent_id) <1:\n indices += list(combinations(sent_id, len(sent_id)))\n\n # get ROUGE score for each candidate summary and sort them in descending order\n score = []\n for i in indices:\n i = list(i)\n i.sort()\n # write dec\n dec = []\n for j in i:\n sent = data['text'][j]\n dec.append(sent)\n score.append((i, get_rouge(idx_path, dec)))\n score.sort(key=lambda x : x[1], reverse=True)\n\n # write candidate indices and score\n data['ext_idx'] = sent_id\n data['indices'] = []\n data['score'] = []\n for i, R in score:\n data['indices'].append(list(map(int, i)))\n data['score'].append(R)\n\n # tokenize and get candidate_id\n candidate_summary = []\n for i in data['indices']:\n cur_summary = [cls]\n for j in i:\n cur_summary += data['text'][j].split()\n cur_summary = cur_summary[:MAX_LEN]\n cur_summary = ' '.join(cur_summary)\n candidate_summary.append(cur_summary)\n\n data['candidate_id'] = []\n for summary in candidate_summary:\n token_ids = tokenizer.encode(summary, add_special_tokens=False)[:(MAX_LEN - 1)]\n token_ids += sep_id\n data['candidate_id'].append(token_ids)\n\n # tokenize and get text_id\n text = [cls]\n for sent in data['text']:\n text += sent.split()\n text = text[:MAX_LEN]\n text = ' '.join(text)\n token_ids = tokenizer.encode(text, add_special_tokens=False)[:(MAX_LEN - 1)]\n token_ids += sep_id\n data['text_id'] = token_ids\n\n # tokenize and get summary_id\n summary = [cls]\n for sent in data['summary']:\n summary += sent.split()\n summary = summary[:MAX_LEN]\n summary = ' '.join(summary)\n token_ids = tokenizer.encode(summary, add_special_tokens=False)[:(MAX_LEN - 1)]\n token_ids += sep_id\n data['summary_id'] = token_ids\n\n # write processed data to temporary file\n processed_path = join(temp_path, 'processed')\n with open(join(processed_path, '{}.json'.format(idx)), 'w') as f:\n json.dump(data, f, indent=4)\n\n sp.call('rm -r ' + idx_path, shell=True)\n\ndef get_candidates_mp(args):\n \n # choose tokenizer\n if args.tokenizer == 'bert':\n tokenizer = BertTokenizer.from_pretrained('/root/moming/pretrained_language_models/bert_eng')\n cls, sep = '[CLS]', '[SEP]'\n else:\n tokenizer = RobertaTokenizer.from_pretrained('/root/moming/pretrained_language_models/roberta')\n cls, sep = '', ''\n sep_id = tokenizer.encode(sep, add_special_tokens=False)\n\n # load original data and indices\n global original_data, sent_ids\n original_data = load_jsonl(args.data_path)\n # sent_ids = load_jsonl(args.index_path)\n n_files = len(original_data)\n # assert len(sent_ids) == len(original_data)\n print('total {} documents'.format(n_files))\n if not os.path.exists(temp_path):\n os.makedirs(temp_path)\n processed_path = join(temp_path, 'processed')\n if not os.path.exists(processed_path):\n os.makedirs(processed_path)\n\n # use multi-processing to get candidate summaries\n start = time()\n print('start getting candidates with multi-processing !!!')\n \n with mp.Pool(40) as pool:\n list(pool.imap_unordered(get_candidates(tokenizer, cls, sep_id), range(n_files), chunksize=64))\n \n print('finished in {}'.format(timedelta(seconds=time()-start)))\n \n # write processed data\n print('start writing {} files'.format(n_files))\n for i in range(n_files):\n with open(join(processed_path, '{}.json'.format(i))) as f:\n data = json.loads(f.read())\n with open(args.write_path, 'a') as f:\n print(json.dumps(data), file=f)\n \n os.system('rm -r {}'.format(temp_path))\n\nif __name__ == '__main__':\n \n parser = argparse.ArgumentParser(\n description='Process truncated documents to obtain candidate summaries'\n )\n # parser.add_argument('--tokenizer', type=str, default='bert',#required=True,\n parser.add_argument('--tokenizer', type=str, default='roberta',#required=True,\n help='BERT/RoBERTa')\n parser.add_argument('--data_path', type=str, default='/root/moming/result/presumm/test_match.jsonl',# required=True,\n help='path to the original dataset, the original dataset should contain text and summary')\n parser.add_argument('--index_path', type=str, default='/root/moming/data/disco/index',#required=True,\n help='indices of the remaining sentences of the truncated document')\n parser.add_argument('--write_path', type=str, default='/root/moming/data/disco/bigbird_test_CNNDM_bert.jsonl', # required=True,\n help='path to store the processed dataset')\n\n args = parser.parse_args()\n assert args.tokenizer in ['bert', 'roberta']\n assert exists(args.data_path)\n assert exists(args.index_path)\n\n get_candidates_mp(args)\n", "repo_name": "alibaba/EasyNLP", "sub_path": "examples/parasum/paraphrase_pretraining/preprocess/get_candidate.py", "file_name": "get_candidate.py", "file_ext": "py", "file_size_in_byte": 8334, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1835, "dataset": "github-code", "pt": "97", "api": [{"api_name": "json.loads", "line_number": 32, "usage_type": "call"}, {"api_name": "pyrouge.utils.log.get_global_console_logger", "line_number": 36, "usage_type": "call"}, {"api_name": "pyrouge.utils.log", "line_number": 36, "usage_type": "name"}, {"api_name": "logging.WARNING", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "tempfile.TemporaryDirectory", "line_number": 47, "usage_type": "call"}, {"api_name": "pyrouge.Rouge155.convert_summaries_to_rouge_format", "line_number": 48, "usage_type": "call"}, {"api_name": "pyrouge.Rouge155", "line_number": 48, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "pyrouge.Rouge155.convert_summaries_to_rouge_format", "line_number": 50, "usage_type": "call"}, {"api_name": "pyrouge.Rouge155", "line_number": 50, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "pyrouge.Rouge155.write_config_static", "line_number": 52, "usage_type": "call"}, {"api_name": "pyrouge.Rouge155", "line_number": 52, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 60, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 76, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 78, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 89, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 98, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 99, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 162, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 163, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 164, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 166, "usage_type": "call"}, {"api_name": "cytoolz.curry", "line_number": 69, "usage_type": "name"}, {"api_name": "transformers.BertTokenizer.from_pretrained", "line_number": 172, "usage_type": "call"}, {"api_name": "transformers.BertTokenizer", "line_number": 172, "usage_type": "name"}, {"api_name": "transformers.RobertaTokenizer.from_pretrained", "line_number": 175, "usage_type": "call"}, {"api_name": "transformers.RobertaTokenizer", "line_number": 175, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 186, "usage_type": "call"}, {"api_name": "os.path", "line_number": 186, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 187, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 188, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 189, "usage_type": "call"}, {"api_name": "os.path", "line_number": 189, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 190, "usage_type": "call"}, {"api_name": "time.time", "line_number": 193, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 196, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 199, "usage_type": "call"}, {"api_name": "time.time", "line_number": 199, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 204, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 205, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 207, "usage_type": "call"}, {"api_name": "os.system", "line_number": 209, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 213, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 228, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 229, "usage_type": "call"}]} +{"seq_id": "15787160669", "text": "from flask import Flask, render_template, redirect\nfrom flask_pymongo import PyMongo\nimport scrape_mars\n\n\napp = Flask(__name__)\n\n\n\n# Create connection and Pass connection to the pymongo instance.\n#client = PyMongo.MongoClient(conn)\napp.config[\"MONGO_URI\"] = \"mongodb://localhost:27017/MarsDB\"\nmongo = PyMongo(app)\n\ncollection = mongo.db.mars \n\n#index route to render mongo database\n@app.route(\"/\")\ndef home():\n\n # Find data from the mongo database\n mars = collection.find_one()\n\n # Return template and data\n return render_template(\"index.html\",mars=mars )\n\n\n#route to render scrape template\n@app.route(\"/scrape\")\ndef scraper():\n mars_data = scrape_mars.scrape_all()\n \n collection.update({},mars_data,upsert=True)\n\n return redirect (\"/\")\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n", "repo_name": "CORNELIAHINTON/web-scraping-challenge", "sub_path": "Mission_to_Mars/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 814, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "flask_pymongo.PyMongo", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 25, "usage_type": "call"}, {"api_name": "scrape_mars.scrape_all", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "11732087970", "text": "import formish\nimport schemaish\nimport transaction\nfrom validatish import validator\n\nfrom pyramid.httpexceptions import HTTPFound\nfrom pyramid.httpexceptions import HTTPOk\n\nfrom zope.component.event import objectEventNotify\nfrom zope.component import queryUtility\nfrom zope.component import getMultiAdapter\nfrom zope.component import getAdapter\n\nfrom pyramid.exceptions import NotFound\nfrom pyramid.security import authenticated_userid\nfrom pyramid.security import has_permission\nfrom pyramid.url import resource_url\nfrom repoze.workflow import get_workflow\nfrom pyramid.traversal import resource_path\n\nfrom repoze.lemonade.content import create_content\n\nfrom karl.events import ObjectModifiedEvent\nfrom karl.events import ObjectWillBeModifiedEvent\n\nfrom karl.models.interfaces import ICommunity\nfrom karl.models.interfaces import IIntranets\nfrom karl.models.interfaces import ITagQuery\nfrom karl.models.interfaces import ICatalogSearch\n\nfrom karl.utilities import lock\nfrom karl.utilities.alerts import Alerts\nfrom karl.utilities.image import relocate_temp_images\nfrom karl.utilities.interfaces import IAlerts\nfrom karl.utils import find_interface\nfrom karl.utils import find_profiles\nfrom karl.utils import find_repo\n\nfrom karl.views.api import TemplateAPI\n\nfrom karl.views.utils import convert_to_script\nfrom karl.views.tags import get_tags_client_data\nfrom karl.views.utils import make_name\nfrom karl.views.tags import set_tags\nfrom karl.views.forms import widgets as karlwidgets\nfrom karl.views.forms import validators as karlvalidators\nfrom karl.views.versions import format_local_date\n\nfrom karl.content.interfaces import IWiki\nfrom karl.content.interfaces import IWikiPage\nfrom karl.content.models.wiki import WikiPage\nfrom karl.content.views.utils import extract_description\nfrom karl.content.views.utils import sendalert_default\n\nfrom karl.security.workflow import get_security_states\n\n_wiki_text_help = \"\"\"You can create a new page by naming it and surrounding\nthe name with ((double parentheses)). When you save the page, the contents\nof the parentheses will have a small + link next to it, which you can click\nto create a new page with that name.\"\"\"\n\ntags_field = schemaish.Sequence(schemaish.String())\ntext_field = schemaish.String(\n title='Body text',\n description=_wiki_text_help,\n )\nsendalert_field = schemaish.Boolean(\n title='Send email alert to community members?')\nsecurity_field = schemaish.String(\n description=('Items marked as private can only be seen by '\n 'members of this community.'))\n\ndef redirect_to_front_page(context, request):\n\n front_page = context['front_page']\n location = resource_url(front_page, request)\n return HTTPFound(location=location)\n\n\ndef redirect_to_add_form(context, request):\n return HTTPFound(\n location=resource_url(context, request, 'add_wikipage.html'))\n\n\nclass AddWikiPageFormController(object):\n def __init__(self, context, request):\n self.context = context\n self.request = request\n self.workflow = get_workflow(IWikiPage, 'security', context)\n\n def _get_security_states(self):\n return get_security_states(self.workflow, None, self.request)\n\n def form_defaults(self):\n defaults = {\n 'title':self.request.params.get('title', ''),\n 'tags':[],\n 'text':'',\n 'sendalert': sendalert_default(self.context,\n self.request)\n }\n if self.workflow is not None:\n defaults['security_state'] = self.workflow.initial_state\n return defaults\n\n def form_fields(self):\n fields = []\n title_field = schemaish.String(\n validator=validator.All(\n validator.Length(max=100),\n validator.Required(),\n karlvalidators.FolderNameAvailable(self.context),\n )\n )\n fields.append(('title', title_field))\n fields.append(('tags', tags_field))\n fields.append(('text', text_field))\n fields.append(('sendalert', sendalert_field))\n security_states = self._get_security_states()\n if security_states:\n fields.append(('security_state', security_field))\n return fields\n\n def form_widgets(self, fields):\n widgets = {\n 'title':formish.Hidden(empty=''),\n 'tags':karlwidgets.TagsAddWidget(),\n 'text':karlwidgets.RichTextWidget(empty=''),\n 'sendalert':karlwidgets.SendAlertCheckbox(),\n }\n security_states = self._get_security_states()\n schema = dict(fields)\n if 'security_state' in schema:\n security_states = self._get_security_states()\n widgets['security_state'] = formish.RadioChoice(\n options=[ (s['name'], s['title']) for s in security_states],\n none_option=None)\n return widgets\n\n def __call__(self):\n api = TemplateAPI(self.context, self.request,\n 'Add Wiki Page')\n api.karl_client_data['text'] = dict(\n enable_wiki_plugin = True,\n enable_imagedrawer_upload = True,\n )\n return {'api':api, 'actions':()}\n\n def handle_cancel(self):\n return HTTPFound(location=resource_url(self.context, self.request))\n\n def handle_submit(self, converted):\n request = self.request\n context = self.context\n workflow = self.workflow\n wikipage = create_content(\n IWikiPage,\n converted['title'],\n converted['text'],\n extract_description(converted['text']),\n authenticated_userid(request),\n )\n\n name = make_name(context, converted['title'])\n context[name] = wikipage\n\n if workflow is not None:\n workflow.initialize(wikipage)\n if 'security_state' in converted:\n workflow.transition_to_state(wikipage,\n request,\n converted['security_state'])\n\n # Save the tags on it.\n set_tags(wikipage, request, converted['tags'])\n\n relocate_temp_images(wikipage, request)\n\n if converted['sendalert']:\n alerts = queryUtility(IAlerts, default=Alerts())\n alerts.emit(wikipage, request)\n\n msg = '?status_message=Wiki%20Page%20created'\n location = resource_url(wikipage, request) + msg\n return HTTPFound(location=location)\n\n\ndef get_wikitoc_data(context, request):\n wikiparent = context.__parent__\n search = getAdapter(context, ICatalogSearch)\n count, docids, resolver = search(\n path = resource_path(wikiparent),\n interfaces = [IWikiPage,]\n )\n items = []\n profiles = find_profiles(context)\n for docid in docids:\n entry = resolver(docid)\n tags = getMultiAdapter((entry, request), ITagQuery).tagswithcounts\n author = entry.creator\n profile = profiles.get(author, None)\n profile_url = resource_url(profile, request)\n if profile is not None:\n author_name = '%s %s' % (profile.firstname, profile.lastname)\n else:\n author_name = author\n items.append(dict(\n id = \"id_\" + entry.__name__,\n name = entry.__name__,\n title = entry.title,\n author = author,\n author_name = author_name,\n profile_url = profile_url,\n tags = [tag['tag'] for tag in tags],\n created = entry.created.isoformat(),\n modified = entry.modified.isoformat(),\n ))\n result = dict(\n items = items,\n )\n return result\n\n\ndef show_wikipage_view(context, request):\n is_front_page = (context.__name__ == 'front_page')\n if is_front_page:\n community = find_interface(context, ICommunity)\n page_title = '%s Community Wiki Page' % community.title\n backto = False\n else:\n page_title = context.title\n backto = {\n 'href': resource_url(context.__parent__, request),\n 'title': context.__parent__.title,\n }\n\n actions = []\n if has_permission('edit', context, request):\n actions.append(('Edit', resource_url(context, request, 'edit.html')))\n if has_permission('delete', context, request) and not is_front_page:\n actions.append(('Delete', resource_url(context, request, 'delete.html')))\n repo = find_repo(context)\n show_trash = False\n if not find_interface(context, IIntranets):\n if repo is not None and has_permission('edit', context, request):\n actions.append(('History', resource_url(context, request, 'history.html')))\n show_trash = True\n if has_permission('administer', context, request):\n actions.append(('Advanced', resource_url(context, request, 'advanced.html')))\n\n api = TemplateAPI(context, request, page_title)\n\n client_json_data = dict(\n tagbox = get_tags_client_data(context, request),\n )\n\n wiki = find_interface(context, IWiki)\n feed_url = resource_url(wiki, request, \"atom.xml\")\n return dict(\n api=api,\n actions=actions,\n head_data=convert_to_script(client_json_data),\n feed_url=feed_url,\n backto=backto,\n is_front_page=is_front_page,\n show_trash=show_trash,\n lock_info=lock.lock_info_for_view(context, request),\n )\n\n\ndef preview_wikipage_view(context, request, WikiPage=WikiPage, tz=None):\n version_num = int(request.params['version_num'])\n repo = find_repo(context)\n for version in repo.history(context.docid):\n if version.version_num == version_num:\n break\n else:\n raise NotFound(\"No such version: %d\" % version_num)\n\n page = WikiPage()\n page.__parent__ = context.__parent__\n page.revert(version)\n\n is_front_page = (context.__name__ == 'front_page')\n if is_front_page:\n community = find_interface(context, ICommunity)\n page_title = '%s Community Wiki Page' % community.title\n else:\n page_title = page.title\n\n profiles = find_profiles(context)\n author = profiles[version.user]\n\n # Extra paranoia, probably not strictly necessary. I just want to make\n # extra special sure that the temp WikiPage object we create above\n # doesn't accidentally get attached to the persistent object graph.\n transaction.doom()\n\n return {\n 'date': format_local_date(version.archive_time, tz),\n 'author': author.title,\n 'title': page_title,\n 'body': page.cook(request),\n }\n\n\ndef show_wikitoc_view(context, request):\n\n is_front_page = (context.__name__ == 'front_page')\n if is_front_page:\n community = find_interface(context, ICommunity)\n page_title = '%s Community Wiki Page' % community.title\n backto = False\n else:\n page_title = context.title\n backto = {\n 'href': resource_url(context.__parent__, request),\n 'title': context.__parent__.title,\n }\n\n actions = []\n\n api = TemplateAPI(context, request, page_title)\n\n wikitoc_data = get_wikitoc_data(context, request)\n\n page_data = dict(\n wikitoc = wikitoc_data,\n )\n\n client_json_data = convert_to_script(page_data)\n\n wiki = find_interface(context, IWiki)\n feed_url = resource_url(wiki, request, \"atom.xml\")\n repo = find_repo(context)\n show_trash = repo is not None and has_permission('edit', context, request)\n\n return dict(api=api,\n actions=actions,\n head_data=client_json_data,\n feed_url=feed_url,\n backto=backto,\n lock_info=lock.lock_info_for_view(context, request),\n show_trash=show_trash,\n )\n\n\nclass EditWikiPageFormController(object):\n def __init__(self, context, request):\n self.context = context\n self.request = request\n self.workflow = get_workflow(IWikiPage, 'security', context)\n self.userid = authenticated_userid(self.request)\n\n def _get_security_states(self):\n return get_security_states(self.workflow, None, self.request)\n\n def form_defaults(self):\n defaults = {\n 'title':self.context.title,\n 'tags':[],\n 'text':self.context.text,\n }\n if self.workflow is not None:\n defaults['security_state'] = self.workflow.state_of(self.context)\n return defaults\n\n def form_fields(self):\n fields = []\n title_field = schemaish.String(\n validator=validator.All(\n validator.Length(max=100),\n validator.Required(),\n karlvalidators.FolderNameAvailable(\n self.context.__parent__,\n exceptions=(self.context.title,)),\n karlvalidators.WikiTitleAvailable(\n self.context.__parent__,\n exceptions=(self.context.title,)),\n )\n )\n fields.append(('title', title_field))\n fields.append(('tags', tags_field))\n fields.append(('text', text_field))\n security_states = self._get_security_states()\n if security_states:\n fields.append(('security_state', security_field))\n return fields\n\n def form_widgets(self, fields):\n tagdata = get_tags_client_data(self.context, self.request)\n widgets = {\n 'title':formish.Input(empty=''),\n 'tags':karlwidgets.TagsEditWidget(tagdata=tagdata),\n 'text':karlwidgets.RichTextWidget(empty=''),\n }\n security_states = self._get_security_states()\n schema = dict(fields)\n if 'security_state' in schema:\n security_states = self._get_security_states()\n widgets['security_state'] = formish.RadioChoice(\n options=[ (s['name'], s['title']) for s in security_states],\n none_option=None)\n return widgets\n\n def __call__(self):\n if not lock.is_locked(self.context):\n lock.lock(self.context, self.userid)\n\n page_title = 'Edit %s' % self.context.title\n api = TemplateAPI(self.context, self.request, page_title)\n # prepare client data\n api.karl_client_data['text'] = dict(\n enable_wiki_plugin = True,\n enable_imagedrawer_upload = True,\n )\n return {'api':api,\n 'actions':(),\n 'lock_info':lock.lock_info_for_view(self.context, self.request),\n }\n\n def handle_cancel(self):\n if lock.owns_lock(self.context, self.userid):\n lock.clear(self.context)\n return HTTPFound(location=resource_url(self.context, self.request))\n\n def handle_submit(self, converted):\n if lock.owns_lock(self.context, self.userid):\n lock.clear(self.context)\n\n context = self.context\n request = self.request\n workflow = self.workflow\n # *will be* modified event\n objectEventNotify(ObjectWillBeModifiedEvent(context))\n if workflow is not None:\n if 'security_state' in converted:\n workflow.transition_to_state(context, request,\n converted['security_state'])\n\n context.text = converted['text']\n context.description = extract_description(converted['text'])\n newtitle = converted['title']\n if newtitle != context.title:\n context.change_title(newtitle)\n\n # Save the tags on it\n set_tags(context, request, converted['tags'])\n\n # Modified\n context.modified_by = authenticated_userid(request)\n objectEventNotify(ObjectModifiedEvent(context))\n\n location = resource_url(context, request)\n msg = \"?status_message=Wiki%20Page%20edited\"\n return HTTPFound(location=location+msg)\n\ndef unlock_wiki_view(context, request, userid=None):\n if request.method.lower() == 'post':\n if userid is None:\n # for unit tests to override\n userid = authenticated_userid(request)\n if lock.owns_lock(context, userid):\n lock.clear(context)\n return HTTPOk(body='')\n return HTTPFound(location=resource_url(context, request))\n", "repo_name": "karlproject/karl", "sub_path": "karl/content/views/wiki.py", "file_name": "wiki.py", "file_ext": "py", "file_size_in_byte": 16273, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 48, "dataset": "github-code", "pt": "97", "api": [{"api_name": "schemaish.Sequence", "line_number": 62, "usage_type": "call"}, {"api_name": "schemaish.String", "line_number": 62, "usage_type": "call"}, {"api_name": "schemaish.String", "line_number": 63, "usage_type": "call"}, {"api_name": "schemaish.Boolean", "line_number": 67, "usage_type": "call"}, {"api_name": "schemaish.String", "line_number": 69, "usage_type": "call"}, {"api_name": "pyramid.url.resource_url", "line_number": 76, "usage_type": "call"}, {"api_name": "pyramid.httpexceptions.HTTPFound", "line_number": 77, "usage_type": "call"}, {"api_name": "pyramid.httpexceptions.HTTPFound", "line_number": 81, "usage_type": "call"}, {"api_name": "pyramid.url.resource_url", "line_number": 82, "usage_type": "call"}, {"api_name": "repoze.workflow.get_workflow", "line_number": 89, "usage_type": "call"}, {"api_name": "karl.content.interfaces.IWikiPage", "line_number": 89, "usage_type": "argument"}, {"api_name": "karl.security.workflow.get_security_states", "line_number": 92, "usage_type": "call"}, {"api_name": "karl.content.views.utils.sendalert_default", "line_number": 99, "usage_type": "call"}, {"api_name": "schemaish.String", "line_number": 108, "usage_type": "call"}, {"api_name": "validatish.validator.All", "line_number": 109, "usage_type": "call"}, {"api_name": "validatish.validator", "line_number": 109, "usage_type": "name"}, {"api_name": "validatish.validator.Length", "line_number": 110, "usage_type": "call"}, {"api_name": "validatish.validator", "line_number": 110, "usage_type": "name"}, {"api_name": "validatish.validator.Required", "line_number": 111, "usage_type": "call"}, {"api_name": "validatish.validator", "line_number": 111, "usage_type": "name"}, {"api_name": "karl.views.forms.validators.FolderNameAvailable", "line_number": 112, "usage_type": "call"}, {"api_name": "karl.views.forms.validators", "line_number": 112, "usage_type": "name"}, {"api_name": "formish.Hidden", "line_number": 126, "usage_type": "call"}, {"api_name": "karl.views.forms.widgets.TagsAddWidget", "line_number": 127, "usage_type": "call"}, {"api_name": "karl.views.forms.widgets", "line_number": 127, "usage_type": "name"}, {"api_name": "karl.views.forms.widgets.RichTextWidget", "line_number": 128, "usage_type": "call"}, {"api_name": "karl.views.forms.widgets", "line_number": 128, "usage_type": "name"}, {"api_name": "karl.views.forms.widgets.SendAlertCheckbox", "line_number": 129, "usage_type": "call"}, {"api_name": "karl.views.forms.widgets", "line_number": 129, "usage_type": "name"}, {"api_name": "formish.RadioChoice", "line_number": 135, "usage_type": "call"}, {"api_name": "karl.views.api.TemplateAPI", "line_number": 141, "usage_type": "call"}, {"api_name": "pyramid.httpexceptions.HTTPFound", "line_number": 150, "usage_type": "call"}, {"api_name": "pyramid.url.resource_url", "line_number": 150, "usage_type": "call"}, {"api_name": "repoze.lemonade.content.create_content", "line_number": 156, "usage_type": "call"}, {"api_name": "karl.content.interfaces.IWikiPage", "line_number": 157, "usage_type": "argument"}, {"api_name": "karl.content.views.utils.extract_description", "line_number": 160, "usage_type": "call"}, {"api_name": "pyramid.security.authenticated_userid", "line_number": 161, "usage_type": "call"}, {"api_name": "karl.views.utils.make_name", "line_number": 164, "usage_type": "call"}, {"api_name": "karl.views.tags.set_tags", "line_number": 175, "usage_type": "call"}, {"api_name": "karl.utilities.image.relocate_temp_images", "line_number": 177, "usage_type": "call"}, {"api_name": "zope.component.queryUtility", "line_number": 180, "usage_type": "call"}, {"api_name": "karl.utilities.interfaces.IAlerts", "line_number": 180, "usage_type": "argument"}, {"api_name": "karl.utilities.alerts.Alerts", "line_number": 180, "usage_type": "call"}, {"api_name": "pyramid.url.resource_url", "line_number": 184, "usage_type": "call"}, {"api_name": "pyramid.httpexceptions.HTTPFound", "line_number": 185, "usage_type": "call"}, {"api_name": "zope.component.getAdapter", "line_number": 190, "usage_type": "call"}, {"api_name": "karl.models.interfaces.ICatalogSearch", "line_number": 190, "usage_type": "argument"}, {"api_name": "pyramid.traversal.resource_path", "line_number": 192, "usage_type": "call"}, {"api_name": "karl.content.interfaces.IWikiPage", "line_number": 193, "usage_type": "name"}, {"api_name": "karl.utils.find_profiles", "line_number": 196, "usage_type": "call"}, {"api_name": "zope.component.getMultiAdapter", "line_number": 199, "usage_type": "call"}, {"api_name": "karl.models.interfaces.ITagQuery", "line_number": 199, "usage_type": "argument"}, {"api_name": "pyramid.url.resource_url", "line_number": 202, "usage_type": "call"}, {"api_name": "karl.utils.find_interface", "line_number": 227, "usage_type": "call"}, {"api_name": "karl.models.interfaces.ICommunity", "line_number": 227, "usage_type": "argument"}, {"api_name": "pyramid.url.resource_url", "line_number": 233, "usage_type": "call"}, {"api_name": "pyramid.security.has_permission", "line_number": 238, "usage_type": "call"}, {"api_name": "pyramid.url.resource_url", "line_number": 239, "usage_type": "call"}, {"api_name": "pyramid.security.has_permission", "line_number": 240, "usage_type": "call"}, {"api_name": "pyramid.url.resource_url", "line_number": 241, "usage_type": "call"}, {"api_name": "karl.utils.find_repo", "line_number": 242, "usage_type": "call"}, {"api_name": "karl.utils.find_interface", "line_number": 244, "usage_type": "call"}, {"api_name": "karl.models.interfaces.IIntranets", "line_number": 244, "usage_type": "argument"}, {"api_name": "pyramid.security.has_permission", "line_number": 245, "usage_type": "call"}, {"api_name": "pyramid.url.resource_url", "line_number": 246, "usage_type": "call"}, {"api_name": "pyramid.security.has_permission", "line_number": 248, "usage_type": "call"}, {"api_name": "pyramid.url.resource_url", "line_number": 249, "usage_type": "call"}, {"api_name": "karl.views.api.TemplateAPI", "line_number": 251, "usage_type": "call"}, {"api_name": "karl.views.tags.get_tags_client_data", "line_number": 254, "usage_type": "call"}, {"api_name": "karl.utils.find_interface", "line_number": 257, "usage_type": "call"}, {"api_name": "karl.content.interfaces.IWiki", "line_number": 257, "usage_type": "argument"}, {"api_name": "pyramid.url.resource_url", "line_number": 258, "usage_type": "call"}, {"api_name": "karl.views.utils.convert_to_script", "line_number": 262, "usage_type": "call"}, {"api_name": "karl.utilities.lock.lock_info_for_view", "line_number": 267, "usage_type": "call"}, {"api_name": "karl.utilities.lock", "line_number": 267, "usage_type": "name"}, {"api_name": "karl.content.models.wiki.WikiPage", "line_number": 271, "usage_type": "name"}, {"api_name": "karl.utils.find_repo", "line_number": 273, "usage_type": "call"}, {"api_name": "pyramid.exceptions.NotFound", "line_number": 278, "usage_type": "call"}, {"api_name": "karl.content.models.wiki.WikiPage", "line_number": 280, "usage_type": "call"}, {"api_name": "karl.utils.find_interface", "line_number": 286, "usage_type": "call"}, {"api_name": "karl.models.interfaces.ICommunity", "line_number": 286, "usage_type": "argument"}, {"api_name": "karl.utils.find_profiles", "line_number": 291, "usage_type": "call"}, {"api_name": "transaction.doom", "line_number": 297, "usage_type": "call"}, {"api_name": "karl.views.versions.format_local_date", "line_number": 300, "usage_type": "call"}, {"api_name": "karl.utils.find_interface", "line_number": 311, "usage_type": "call"}, {"api_name": "karl.models.interfaces.ICommunity", "line_number": 311, "usage_type": "argument"}, {"api_name": "pyramid.url.resource_url", "line_number": 317, "usage_type": "call"}, {"api_name": "karl.views.api.TemplateAPI", "line_number": 323, "usage_type": "call"}, {"api_name": "karl.views.utils.convert_to_script", "line_number": 331, "usage_type": "call"}, {"api_name": "karl.utils.find_interface", "line_number": 333, "usage_type": "call"}, {"api_name": "karl.content.interfaces.IWiki", "line_number": 333, "usage_type": "argument"}, {"api_name": "pyramid.url.resource_url", "line_number": 334, "usage_type": "call"}, {"api_name": "karl.utils.find_repo", "line_number": 335, "usage_type": "call"}, {"api_name": "pyramid.security.has_permission", "line_number": 336, "usage_type": "call"}, {"api_name": "karl.utilities.lock.lock_info_for_view", "line_number": 343, "usage_type": "call"}, {"api_name": "karl.utilities.lock", "line_number": 343, "usage_type": "name"}, {"api_name": "repoze.workflow.get_workflow", "line_number": 352, "usage_type": "call"}, {"api_name": "karl.content.interfaces.IWikiPage", "line_number": 352, "usage_type": "argument"}, {"api_name": "pyramid.security.authenticated_userid", "line_number": 353, "usage_type": "call"}, {"api_name": "karl.security.workflow.get_security_states", "line_number": 356, "usage_type": "call"}, {"api_name": "schemaish.String", "line_number": 370, "usage_type": "call"}, {"api_name": "validatish.validator.All", "line_number": 371, "usage_type": "call"}, {"api_name": "validatish.validator", "line_number": 371, "usage_type": "name"}, {"api_name": "validatish.validator.Length", "line_number": 372, "usage_type": "call"}, {"api_name": "validatish.validator", "line_number": 372, "usage_type": "name"}, {"api_name": "validatish.validator.Required", "line_number": 373, "usage_type": "call"}, {"api_name": "validatish.validator", "line_number": 373, "usage_type": "name"}, {"api_name": "karl.views.forms.validators.FolderNameAvailable", "line_number": 374, "usage_type": "call"}, {"api_name": "karl.views.forms.validators", "line_number": 374, "usage_type": "name"}, {"api_name": "karl.views.forms.validators.WikiTitleAvailable", "line_number": 377, "usage_type": "call"}, {"api_name": "karl.views.forms.validators", "line_number": 377, "usage_type": "name"}, {"api_name": "karl.views.tags.get_tags_client_data", "line_number": 391, "usage_type": "call"}, {"api_name": "formish.Input", "line_number": 393, "usage_type": "call"}, {"api_name": "karl.views.forms.widgets.TagsEditWidget", "line_number": 394, "usage_type": "call"}, {"api_name": "karl.views.forms.widgets", "line_number": 394, "usage_type": "name"}, {"api_name": "karl.views.forms.widgets.RichTextWidget", "line_number": 395, "usage_type": "call"}, {"api_name": "karl.views.forms.widgets", "line_number": 395, "usage_type": "name"}, {"api_name": "formish.RadioChoice", "line_number": 401, "usage_type": "call"}, {"api_name": "karl.utilities.lock.is_locked", "line_number": 407, "usage_type": "call"}, {"api_name": "karl.utilities.lock", "line_number": 407, "usage_type": "name"}, {"api_name": "karl.utilities.lock.lock", "line_number": 408, "usage_type": "call"}, {"api_name": "karl.utilities.lock", "line_number": 408, "usage_type": "name"}, {"api_name": "karl.views.api.TemplateAPI", "line_number": 411, "usage_type": "call"}, {"api_name": "karl.utilities.lock.lock_info_for_view", "line_number": 419, "usage_type": "call"}, {"api_name": "karl.utilities.lock", "line_number": 419, "usage_type": "name"}, {"api_name": "karl.utilities.lock.owns_lock", "line_number": 423, "usage_type": "call"}, {"api_name": "karl.utilities.lock", "line_number": 423, "usage_type": "name"}, {"api_name": "karl.utilities.lock.clear", "line_number": 424, "usage_type": "call"}, {"api_name": "karl.utilities.lock", "line_number": 424, "usage_type": "name"}, {"api_name": "pyramid.httpexceptions.HTTPFound", "line_number": 425, "usage_type": "call"}, {"api_name": "pyramid.url.resource_url", "line_number": 425, "usage_type": "call"}, {"api_name": "karl.utilities.lock.owns_lock", "line_number": 428, "usage_type": "call"}, {"api_name": "karl.utilities.lock", "line_number": 428, "usage_type": "name"}, {"api_name": "karl.utilities.lock.clear", "line_number": 429, "usage_type": "call"}, {"api_name": "karl.utilities.lock", "line_number": 429, "usage_type": "name"}, {"api_name": "zope.component.event.objectEventNotify", "line_number": 435, "usage_type": "call"}, {"api_name": "karl.events.ObjectWillBeModifiedEvent", "line_number": 435, "usage_type": "call"}, {"api_name": "karl.content.views.utils.extract_description", "line_number": 442, "usage_type": "call"}, {"api_name": "karl.views.tags.set_tags", "line_number": 448, "usage_type": "call"}, {"api_name": "pyramid.security.authenticated_userid", "line_number": 451, "usage_type": "call"}, {"api_name": "zope.component.event.objectEventNotify", "line_number": 452, "usage_type": "call"}, {"api_name": "karl.events.ObjectModifiedEvent", "line_number": 452, "usage_type": "call"}, {"api_name": "pyramid.url.resource_url", "line_number": 454, "usage_type": "call"}, {"api_name": "pyramid.httpexceptions.HTTPFound", "line_number": 456, "usage_type": "call"}, {"api_name": "pyramid.security.authenticated_userid", "line_number": 462, "usage_type": "call"}, {"api_name": "karl.utilities.lock.owns_lock", "line_number": 463, "usage_type": "call"}, {"api_name": "karl.utilities.lock", "line_number": 463, "usage_type": "name"}, {"api_name": "karl.utilities.lock.clear", "line_number": 464, "usage_type": "call"}, {"api_name": "karl.utilities.lock", "line_number": 464, "usage_type": "name"}, {"api_name": "pyramid.httpexceptions.HTTPOk", "line_number": 465, "usage_type": "call"}, {"api_name": "pyramid.httpexceptions.HTTPFound", "line_number": 466, "usage_type": "call"}, {"api_name": "pyramid.url.resource_url", "line_number": 466, "usage_type": "call"}]} +{"seq_id": "4166164294", "text": "# coding: utf-8\n# 此插件只适用于maya2020及以上版本\nimport maya.api.OpenMaya as om\nimport maya.api.OpenMayaUI as omui\nimport maya.cmds as cmds\nimport os\n\ndef maya_useNewAPI():\n \"\"\" 告知maya,使用的是maya api 2.0 \"\"\"\n pass\n\nclass JointCreateContext(omui.MPxContext):\n\n TITLE = \"JointCreate Context\"\n\n HELP_TEXT = [\"Select first joint location\",\n \"Select second joint location\",\n \"Select final joint location\",\n \"Press Enter to complete\"]\n\n def __init__(self):\n super(JointCreateContext, self).__init__()\n\n self.setTitleString(JointCreateContext.TITLE)\n plugin_dir_path = os.path.dirname(cmds.pluginInfo(\"joint_create_context.py\",p=True,q=True)) \n self.setImage(plugin_dir_path + \"/icons/icon_windows.png\", omui.MPxContext.kImage1) # 设置工具的图标\n\n self.state = 0 # 判断通过工具选择的对象的个数\n self.context_selection = om.MSelectionList() # 通过工具选择的对象的列表\n \n def helpStateHasChanged(self, event):\n self.update_help_string()\n \n def update_help_string(self):\n self.setHelpString(JointCreateContext.HELP_TEXT[self.state])\n\n def toolOnSetup(self, event):\n \"\"\" 工具加载时执行 \"\"\"\n om.MGlobal.selectCommand(om.MSelectionList()) # 确保工具刚开始使用时是一个健康的选择状态\n self.reset_state()\n\n def toolOffCleanup(self):\n \"\"\" 取消工具加载时执行(在使用工具的同时创建模型maya会自动先取消加载工具再自动加载工具) \"\"\"\n self.reset_state()\n \n def doRelease(self, event, draw_manager, frame_context):\n \"\"\" 松开键时执行 \"\"\"\n if self.state >= 0 and self.state < 3:\n om.MGlobal.selectFromScreen(event.position[0], event.position[1],event.position[0],event.position[1],om.MGlobal.kReplaceList)\n\n active_selection = om.MGlobal.getActiveSelectionList() # 获取当前选择的物体\n if active_selection.length() == 1:\n self.context_selection.merge(active_selection) # 使用merge方法防止重复的对象出现在context_selection中\n\n om.MGlobal.setActiveSelectionList(self.context_selection) # 更改当前选择的物体\n\n self.update_state\n \n def completeAction(self):\n \"\"\" 按下enter键时执行 \"\"\"\n selection_count = self.context_selection.length()\n if selection_count == 3:\n om.MGlobal.setActiveSelectionList(om.MSelectionList())\n\n for i in range(selection_count):\n transform_fn = om.MFnTransform(self.context_selection.getDependNode(i))\n\n cmds.joint(position = transform_fn.translation(om.MSpace.kTransform))\n cmds.delete(transform_fn.name())\n \n cmds.select(clear=True)\n self.reset_state()\n \n else:\n om.MGlobal.displayError(\"Three objects must be selected\")\n \n def deleteAction(self):\n \"\"\" 按下delete键或者backspace键时执行 \"\"\"\n selection_count = self.context_selection.length()\n if selection_count > 0:\n self.context_selection.remove(selection_count - 1)\n\n om.MGlobal.setActiveSelectionList(self.context_selection)\n\n self.update_state()\n \n def abortAction(self):\n \"\"\" 按下esc键时执行 \"\"\"\n self.reset_state()\n\n def update_state(self):\n \"\"\" 更新状态 \"\"\"\n self.state = self.context_selection.length()\n\n self.update_help_string()\n \n def reset_state(self):\n \"\"\" 重置状态 \"\"\"\n om.MGlobal.setActiveSelectionList(om.MSelectionList())\n\n self.context_selection.clear()\n self.update_state\n\nclass JointCreateContextCmd(omui.MPxContextCommand):\n\n COMMAND_NAME = \"rcJointCreateCtx\"\n\n def __init__(self):\n super(JointCreateContextCmd, self).__init__()\n \n def makeObj(self):\n return JointCreateContext()\n \n @classmethod\n def creator(cls):\n return JointCreateContextCmd()\n\ndef initializePlugin(plugin): \n \"\"\" 插件加载时执行这个函数\"\"\"\n vendor = \"RuiChen\" # 插件制作人的名字\n version = \"1.0.0\" # 插件的版本\n\n plugin_fn = om.MFnPlugin(plugin, vendor, version) # 定义插件\n try:\n plugin_fn.registerContextCommand(JointCreateContextCmd.COMMAND_NAME, JointCreateContextCmd.creator)\n except:\n om.MGlobal.displayError(\"Failed to register context command: {0}\".format(JointCreateContextCmd.COMMAND_NAME))\n\ndef uninitializePlugin(plugin):\n \"\"\" 插件取消加载时执行这个函数\"\"\"\n plugin_fn = om.MFnPlugin(plugin)\n try:\n plugin_fn.deregisterContextCommand(JointCreateContextCmd.COMMAND_NAME)\n except:\n om.MGlobal.displayError(\"Failed to deregister context command: {0}\".format(JointCreateContextCmd.COMMAND_NAME))\n\nif __name__ == '__main__':\n \"\"\" 注册后,在maya脚本编辑器中的使用方法 \"\"\"\n cmds.file(new=True,force=True)\n plugin_dir_path = os.path.dirname(cmds.pluginInfo(\"joint_create_context.py\",p=True,q=True)) \n test_file_path = plugin_dir_path + \"/test_scene/joint_create_context_test.ma\"\n\n plugin_name = \"joint_create_context.py\" # 插件的文件名\n\n # 如果插件加载了就先取消加载插件\n cmds.evalDeferred('if cmds.pluginInfo(\"{0}\", q=True, loaded=True): cmds.unloadPlugin(\"{0}\")'.format(plugin_name))\n # 如果插件没有加载就加载插件\n cmds.evalDeferred('if not cmds.pluginInfo(\"{0}\", q=True, loaded=True): cmds.loadPlugin(\"{0}\")'.format(plugin_name))\n cmds.evalDeferred('cmds.file(test_file_path,o=True,f=True)')\n cmds.evalDeferred('context = cmds.rcJointCreateCtx(); cmds.setToolTo(context)')\n", "repo_name": "violet-chen/mayaPythonAPI_Zurbrigg", "sub_path": "joint_create_context.py", "file_name": "joint_create_context.py", "file_ext": "py", "file_size_in_byte": 5823, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "maya.api.OpenMayaUI.MPxContext", "line_number": 12, "usage_type": "attribute"}, {"api_name": "maya.api.OpenMayaUI", "line_number": 12, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "maya.cmds.pluginInfo", "line_number": 25, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 25, "usage_type": "name"}, {"api_name": "maya.api.OpenMayaUI.MPxContext", "line_number": 26, "usage_type": "attribute"}, {"api_name": "maya.api.OpenMayaUI", "line_number": 26, "usage_type": "name"}, {"api_name": "maya.api.OpenMaya.MSelectionList", "line_number": 29, "usage_type": "call"}, {"api_name": "maya.api.OpenMaya", "line_number": 29, "usage_type": "name"}, {"api_name": "maya.api.OpenMaya.MGlobal.selectCommand", "line_number": 39, "usage_type": "call"}, {"api_name": "maya.api.OpenMaya.MGlobal", "line_number": 39, "usage_type": "attribute"}, {"api_name": "maya.api.OpenMaya", "line_number": 39, "usage_type": "name"}, {"api_name": "maya.api.OpenMaya.MSelectionList", "line_number": 39, "usage_type": "call"}, {"api_name": "maya.api.OpenMaya.MGlobal.selectFromScreen", "line_number": 49, "usage_type": "call"}, {"api_name": "maya.api.OpenMaya.MGlobal", "line_number": 49, "usage_type": "attribute"}, {"api_name": "maya.api.OpenMaya", "line_number": 49, "usage_type": "name"}, {"api_name": "maya.api.OpenMaya.MGlobal.getActiveSelectionList", "line_number": 51, "usage_type": "call"}, {"api_name": "maya.api.OpenMaya.MGlobal", "line_number": 51, "usage_type": "attribute"}, {"api_name": "maya.api.OpenMaya", "line_number": 51, "usage_type": "name"}, {"api_name": "maya.api.OpenMaya.MGlobal.setActiveSelectionList", "line_number": 55, "usage_type": "call"}, {"api_name": "maya.api.OpenMaya.MGlobal", "line_number": 55, "usage_type": "attribute"}, {"api_name": "maya.api.OpenMaya", "line_number": 55, "usage_type": "name"}, {"api_name": "maya.api.OpenMaya.MGlobal.setActiveSelectionList", "line_number": 63, "usage_type": "call"}, {"api_name": "maya.api.OpenMaya.MGlobal", "line_number": 63, "usage_type": "attribute"}, {"api_name": "maya.api.OpenMaya", "line_number": 63, "usage_type": "name"}, {"api_name": "maya.api.OpenMaya.MSelectionList", "line_number": 63, "usage_type": "call"}, {"api_name": "maya.api.OpenMaya.MFnTransform", "line_number": 66, "usage_type": "call"}, {"api_name": "maya.api.OpenMaya", "line_number": 66, "usage_type": "name"}, {"api_name": "maya.cmds.joint", "line_number": 68, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 68, "usage_type": "name"}, {"api_name": "maya.api.OpenMaya.MSpace", "line_number": 68, "usage_type": "attribute"}, {"api_name": "maya.api.OpenMaya", "line_number": 68, "usage_type": "name"}, {"api_name": "maya.cmds.delete", "line_number": 69, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 69, "usage_type": "name"}, {"api_name": "maya.cmds.select", "line_number": 71, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 71, "usage_type": "name"}, {"api_name": "maya.api.OpenMaya.MGlobal.displayError", "line_number": 75, "usage_type": "call"}, {"api_name": "maya.api.OpenMaya.MGlobal", "line_number": 75, "usage_type": "attribute"}, {"api_name": "maya.api.OpenMaya", "line_number": 75, "usage_type": "name"}, {"api_name": "maya.api.OpenMaya.MGlobal.setActiveSelectionList", "line_number": 83, "usage_type": "call"}, {"api_name": "maya.api.OpenMaya.MGlobal", "line_number": 83, "usage_type": "attribute"}, {"api_name": "maya.api.OpenMaya", "line_number": 83, "usage_type": "name"}, {"api_name": "maya.api.OpenMaya.MGlobal.setActiveSelectionList", "line_number": 99, "usage_type": "call"}, {"api_name": "maya.api.OpenMaya.MGlobal", "line_number": 99, "usage_type": "attribute"}, {"api_name": "maya.api.OpenMaya", "line_number": 99, "usage_type": "name"}, {"api_name": "maya.api.OpenMaya.MSelectionList", "line_number": 99, "usage_type": "call"}, {"api_name": "maya.api.OpenMayaUI.MPxContextCommand", "line_number": 104, "usage_type": "attribute"}, {"api_name": "maya.api.OpenMayaUI", "line_number": 104, "usage_type": "name"}, {"api_name": "maya.api.OpenMaya.MFnPlugin", "line_number": 123, "usage_type": "call"}, {"api_name": "maya.api.OpenMaya", "line_number": 123, "usage_type": "name"}, {"api_name": "maya.api.OpenMaya.MGlobal.displayError", "line_number": 127, "usage_type": "call"}, {"api_name": "maya.api.OpenMaya.MGlobal", "line_number": 127, "usage_type": "attribute"}, {"api_name": "maya.api.OpenMaya", "line_number": 127, "usage_type": "name"}, {"api_name": "maya.api.OpenMaya.MFnPlugin", "line_number": 131, "usage_type": "call"}, {"api_name": "maya.api.OpenMaya", "line_number": 131, "usage_type": "name"}, {"api_name": "maya.api.OpenMaya.MGlobal.displayError", "line_number": 135, "usage_type": "call"}, {"api_name": "maya.api.OpenMaya.MGlobal", "line_number": 135, "usage_type": "attribute"}, {"api_name": "maya.api.OpenMaya", "line_number": 135, "usage_type": "name"}, {"api_name": "maya.cmds.file", "line_number": 139, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 139, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path", "line_number": 140, "usage_type": "attribute"}, {"api_name": "maya.cmds.pluginInfo", "line_number": 140, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 140, "usage_type": "name"}, {"api_name": "maya.cmds.evalDeferred", "line_number": 146, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 146, "usage_type": "name"}, {"api_name": "maya.cmds.evalDeferred", "line_number": 148, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 148, "usage_type": "name"}, {"api_name": "maya.cmds.evalDeferred", "line_number": 149, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 149, "usage_type": "name"}, {"api_name": "maya.cmds.evalDeferred", "line_number": 150, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 150, "usage_type": "name"}]} +{"seq_id": "10334883057", "text": "import time\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom .model.STLSTMModel import STLSTMModel\nfrom .utils.STLSTMDataset import STLSTMDataset\nfrom ...utils.dataset_util import DatasetInputType, normalise_skeleton_3d_batch\nfrom ...utils.model_name_generator import ModelNameGenerator\nfrom ...utils.training_utils import Optimizer, print_train_results, validate_model, save_diagram_common, \\\n save_model_common, save_loss_common, get_training_batch_accuracy\n\n\ndef train(classes, training_data, training_labels, validation_data, validation_labels, analysed_kpts_description,\n dropout=0.5, epoch_nb=5000, batch_size=128, hidden_size=128, learning_rate=0.0001,\n weight_decay=0, momentum=0.9, val_every=5, print_every=50, lbd=0.5, steps=32, split=20, input_type=DatasetInputType.SPLIT,\n optimizer_type=Optimizer.RMSPROP, results_path='results', model_name_suffix='', save_loss=True, save_diagram=True,\n save_model=True, save_model_for_inference=False, add_random_rotation_y=False, use_two_layers=True, use_tau=False, use_bias=True,\n is_3d=True, show_diagram=True, print_results=True, use_normalization=True, add_timestamp=True):\n method_name = 'st_lstm'\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n input_size = 3 if is_3d else 2\n joints_count = 12\n\n if input_type == DatasetInputType.TREE:\n joints_count = 29\n\n st_lstm_model = STLSTMModel(input_size, joints_count, hidden_size, len(classes), dropout, use_tau=use_tau,\n bias=use_bias, lbd=lbd, use_two_layers=use_two_layers).to(device)\n\n criterion = nn.NLLLoss()\n\n if optimizer_type == Optimizer.RMSPROP:\n optimizer = optim.RMSprop(st_lstm_model.parameters(), lr=learning_rate, momentum=momentum, weight_decay=weight_decay)\n elif optimizer_type == Optimizer.SGD:\n optimizer = optim.SGD(st_lstm_model.parameters(), lr=learning_rate, momentum=momentum, weight_decay=weight_decay)\n elif optimizer_type == Optimizer.ADAM:\n optimizer = optim.Adam(st_lstm_model.parameters(), lr=learning_rate, weight_decay=weight_decay)\n else:\n raise Exception('Unknown optimizer')\n\n all_train_losses = []\n all_val_losses = []\n\n all_batch_training_accuracies = []\n all_batch_val_accuracies = []\n\n start_time = time.time()\n epoch = 0\n\n if use_normalization:\n training_data = normalise_skeleton_3d_batch(training_data, analysed_kpts_description['left_hip'],\n analysed_kpts_description['right_hip'])\n validation_data = normalise_skeleton_3d_batch(validation_data, analysed_kpts_description['left_hip'],\n analysed_kpts_description['right_hip'])\n\n train_data_loader = STLSTMDataset(training_data, training_labels, batch_size, analysed_kpts_description, split=split,\n steps=steps, input_type=input_type, add_random_rotation_y=add_random_rotation_y)\n validation_data_loader = STLSTMDataset(validation_data, validation_labels, batch_size, analysed_kpts_description,\n split=split, steps=steps, input_type=input_type)\n\n for epoch in range(epoch_nb):\n data, train_y = next(iter(train_data_loader))\n tensor_train_y = torch.from_numpy(train_y).to(device)\n\n optimizer.zero_grad()\n\n tensor_train_x = torch.tensor(data, dtype=torch.float, device=device)\n\n output = st_lstm_model(tensor_train_x)\n\n loss = criterion(output, tensor_train_y)\n\n loss.backward()\n\n optimizer.step()\n\n all_train_losses.append(loss.item())\n\n if epoch % print_every == 0 and epoch > 0 and print_results:\n print_train_results(classes, output, tensor_train_y, epoch, epoch_nb, start_time, loss, batch_size, print_every)\n\n if epoch % val_every == 0 and epoch > 0:\n all_batch_training_accuracies.append(get_training_batch_accuracy(classes, output, tensor_train_y, batch_size)[1])\n with torch.no_grad():\n data_val, val_y = next(iter(validation_data_loader))\n tensor_val_y = torch.from_numpy(val_y).to(device)\n tensor_val_x = torch.tensor(data_val, dtype=torch.float, device=device)\n output_val = st_lstm_model(tensor_val_x)\n loss_val = criterion(output_val, tensor_val_y)\n val_loss, batch_acc = validate_model(tensor_val_y, output_val, classes, epoch, epoch_nb, print_every,\n start_time, batch_size, loss_val, print_results=print_results)\n all_val_losses.append(val_loss)\n all_batch_val_accuracies.append(batch_acc)\n\n model_name = ModelNameGenerator(method_name, model_name_suffix, add_timestamp) \\\n .add_epoch_number(epoch_nb) \\\n .add_batch_size(batch_size) \\\n .add_learning_rate(learning_rate) \\\n .add_optimizer_name(optimizer_type.name) \\\n .add_hidden_size(hidden_size) \\\n .add_input_type(input_type.name) \\\n .add_dropout(dropout) \\\n .add_momentum(momentum) \\\n .add_weight_decay(weight_decay) \\\n .add_split(split) \\\n .add_steps(steps) \\\n .add_lambda(lbd) \\\n .add_random_rotation_y(add_random_rotation_y) \\\n .add_is_bias_used(use_bias) \\\n .add_is_tau(use_tau) \\\n .add_is_two_layers_used(use_two_layers) \\\n .add_is_3d(is_3d) \\\n .add_is_normalization_used(use_normalization) \\\n .generate()\n\n if save_model:\n save_model_common(st_lstm_model, optimizer, epoch, val_every, all_train_losses, all_val_losses,\n save_model_for_inference, results_path, model_name)\n\n if save_diagram:\n save_diagram_common(all_train_losses, all_val_losses, model_name, val_every, epoch_nb, results_path,\n all_batch_training_accuracies, all_batch_val_accuracies, show_diagram=show_diagram)\n\n if save_loss:\n save_loss_common(all_train_losses, all_val_losses, model_name, results_path, all_batch_training_accuracies,\n all_batch_val_accuracies)\n\n return st_lstm_model\n", "repo_name": "HLJ1997/HumanActionRecognition", "sub_path": "har/impl/st_lstm/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 6268, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "97", "api": [{"api_name": "utils.dataset_util.DatasetInputType.SPLIT", "line_number": 17, "usage_type": "attribute"}, {"api_name": "utils.dataset_util.DatasetInputType", "line_number": 17, "usage_type": "name"}, {"api_name": "utils.training_utils.Optimizer.RMSPROP", "line_number": 18, "usage_type": "attribute"}, {"api_name": "utils.training_utils.Optimizer", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 22, "usage_type": "attribute"}, {"api_name": "utils.dataset_util.DatasetInputType.TREE", "line_number": 27, "usage_type": "attribute"}, {"api_name": "utils.dataset_util.DatasetInputType", "line_number": 27, "usage_type": "name"}, {"api_name": "model.STLSTMModel.STLSTMModel", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn.NLLLoss", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 33, "usage_type": "name"}, {"api_name": "utils.training_utils.Optimizer.RMSPROP", "line_number": 35, "usage_type": "attribute"}, {"api_name": "utils.training_utils.Optimizer", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.optim.RMSprop", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 36, "usage_type": "name"}, {"api_name": "utils.training_utils.Optimizer.SGD", "line_number": 37, "usage_type": "attribute"}, {"api_name": "utils.training_utils.Optimizer", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.optim.SGD", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 38, "usage_type": "name"}, {"api_name": "utils.training_utils.Optimizer.ADAM", "line_number": 39, "usage_type": "attribute"}, {"api_name": "utils.training_utils.Optimizer", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 40, "usage_type": "name"}, {"api_name": "time.time", "line_number": 50, "usage_type": "call"}, {"api_name": "utils.dataset_util.normalise_skeleton_3d_batch", "line_number": 54, "usage_type": "call"}, {"api_name": "utils.dataset_util.normalise_skeleton_3d_batch", "line_number": 56, "usage_type": "call"}, {"api_name": "utils.STLSTMDataset.STLSTMDataset", "line_number": 59, "usage_type": "call"}, {"api_name": "utils.STLSTMDataset.STLSTMDataset", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 70, "usage_type": "attribute"}, {"api_name": "utils.training_utils.print_train_results", "line_number": 83, "usage_type": "call"}, {"api_name": "utils.training_utils.get_training_batch_accuracy", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 90, "usage_type": "attribute"}, {"api_name": "utils.training_utils.validate_model", "line_number": 93, "usage_type": "call"}, {"api_name": "utils.model_name_generator.ModelNameGenerator", "line_number": 98, "usage_type": "call"}, {"api_name": "utils.training_utils.save_model_common", "line_number": 120, "usage_type": "call"}, {"api_name": "utils.training_utils.save_diagram_common", "line_number": 124, "usage_type": "call"}, {"api_name": "utils.training_utils.save_loss_common", "line_number": 128, "usage_type": "call"}]} +{"seq_id": "34282720900", "text": "from typing import Optional\n\nfrom . import blender_actions\nfrom . import blender_definitions\n\nfrom codetocad.interfaces import SceneInterface, EntityInterface\nfrom codetocad.codetocad_types import *\nfrom codetocad.utilities import *\n\nfrom .part import Part\nfrom .entity import Entity\n\n\nclass Scene(SceneInterface):\n # Blender's default Scene name is \"Scene\"\n name: str = \"Scene\"\n description: Optional[str] = None\n\n def __init__(self, name: Optional[str] = None, description: Optional[str] = None):\n self.name = name or self.name\n self.description = description\n\n @staticmethod\n def default() -> \"Scene\":\n return Scene()\n\n def create(self):\n raise NotImplementedError()\n return self\n\n def delete(self):\n raise NotImplementedError()\n return self\n\n def get_selected_entity(self) -> \"EntityInterface\":\n return Entity(blender_actions.get_selected_object_name())\n\n def export(\n self,\n file_path: str,\n entities: list[EntityOrItsName],\n overwrite: bool = True,\n scale: float = 1.0,\n ):\n for entity in entities:\n part = entity\n if isinstance(part, str):\n part = Part(part)\n part.export(file_path, overwrite, scale)\n return self\n\n def set_default_unit(self, unit: LengthUnitOrItsName):\n if isinstance(unit, str):\n unit = LengthUnit.from_string(unit)\n\n blenderUnit = blender_definitions.BlenderLength.from_length_unit(unit)\n\n blender_actions.set_default_unit(blenderUnit, self.name)\n return self\n\n def create_group(self, name: str):\n blender_actions.create_collection(name, self.name)\n return self\n\n def delete_group(self, name: str, remove_children: bool):\n blender_actions.remove_collection(\n name=name, scene_name=self.name, remove_children=remove_children\n )\n return self\n\n def remove_from_group(self, entity_name: str, group_name: str):\n if isinstance(entity_name, Entity):\n entity_name = entity_name.name\n\n blender_actions.remove_object_from_collection(\n existing_object_name=entity_name,\n collection_name=group_name,\n scene_name=self.name,\n )\n return self\n\n def assign_to_group(\n self,\n entities: list[EntityOrItsName],\n group_name: str,\n remove_from_other_groups: Optional[bool] = True,\n ):\n for entity in entities:\n entity_name = entity\n if isinstance(entity_name, EntityInterface):\n entity_name = entity_name.name\n\n blender_actions.assign_object_to_collection(\n entity_name, group_name, self.name, remove_from_other_groups or True\n )\n\n return self\n\n def set_visible(self, entities: list[EntityOrItsName], is_visible: bool):\n for entity in entities:\n if isinstance(entity, EntityInterface):\n entity = entity.name\n\n blender_actions.set_object_visibility(entity, is_visible)\n\n return self\n\n def set_background_image(\n self,\n file_path: str,\n location_x: Optional[DimensionOrItsFloatOrStringValue] = 0,\n location_y: Optional[DimensionOrItsFloatOrStringValue] = 0,\n ):\n absoluteFilePath = get_absolute_filepath(file_path)\n\n blender_actions.add_hdr_texture(self.name, absoluteFilePath)\n\n x = blender_definitions.BlenderLength.convert_dimension_to_blender_unit(\n Dimension.from_string(location_x or 0)\n ).value\n y = blender_definitions.BlenderLength.convert_dimension_to_blender_unit(\n Dimension.from_string(location_y or 0)\n ).value\n\n blender_actions.set_background_location(self.name, x, y)\n\n return self\n", "repo_name": "CodeToCAD/CodeToCAD", "sub_path": "providers/blender/blender_provider/scene.py", "file_name": "scene.py", "file_ext": "py", "file_size_in_byte": 3844, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14, "dataset": "github-code", "pt": "94", "api": [{"api_name": "codetocad.interfaces.SceneInterface", "line_number": 14, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 17, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 19, "usage_type": "name"}, {"api_name": "entity.Entity", "line_number": 36, "usage_type": "call"}, {"api_name": "part.Part", "line_number": 48, "usage_type": "call"}, {"api_name": "part.export", "line_number": 49, "usage_type": "call"}, {"api_name": "entity.Entity", "line_number": 72, "usage_type": "argument"}, {"api_name": "typing.Optional", "line_number": 86, "usage_type": "name"}, {"api_name": "codetocad.interfaces.EntityInterface", "line_number": 90, "usage_type": "argument"}, {"api_name": "codetocad.interfaces.EntityInterface", "line_number": 101, "usage_type": "argument"}, {"api_name": "entity.name", "line_number": 102, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 111, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 112, "usage_type": "name"}]} +{"seq_id": "26851824630", "text": "'''\r\nQuestion 3.1 Skeleton Code\r\n\r\nHere you should implement and evaluate the k-NN classifier.\r\n'''\r\n\r\nimport data\r\nimport numpy as np\r\n# Import pyplot - plt.imshow is useful!\r\nimport matplotlib.pyplot as plt\r\nfrom statistics import mode, mean\r\nfrom sklearn.model_selection import KFold\r\nfrom scipy import stats as s\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.model_selection import cross_val_score\r\n\r\nfrom sklearn.exceptions import ConvergenceWarning\r\n\r\nclass MLP(object):\r\n '''\r\n K Nearest Neighbor classifier\r\n '''\r\n\r\n def __init__(self, train_data, train_labels):\r\n self.train_data = train_data\r\n self.train_norm = (self.train_data**2).sum(axis=1).reshape(-1,1)\r\n self.train_labels = train_labels\r\n\r\n def mlpclassifier(self, solver, activation, learning_rate, layer_size):\r\n clf = MLPClassifier(solver=solver, activation=activation, learning_rate_init=learning_rate, hidden_layer_sizes=layer_size)\r\n clf.fit(self.train_data, self.train_labels)\r\n return clf\r\n\r\n\r\ndef cross_validation(train_data, train_labels):\r\n '''\r\n Perform 10-fold cross validation to find the best value for k\r\n\r\n Note: Previously this function took knn as an argument instead of train_data,train_labels.\r\n The intention was for students to take the training data from the knn object - this should be clearer\r\n from the new function signature.\r\n '''\r\n best_solver = \"\"\r\n best_activation = \"\"\r\n best_learning_rate = 0\r\n best_layer_size = 0\r\n best_accuracy = 0\r\n solvers = [\"sgd\"]\r\n activations = [\"logistic\", \"tanh\"]\r\n alphas = [0.01]\r\n hidden_layer = [(100,)]\r\n for solver in solvers:\r\n for activation in activations:\r\n for alpha in alphas:\r\n for hidden_layer_size in hidden_layer:\r\n accuracy = 0\r\n kf = KFold(10, shuffle=True, random_state=89)\r\n for train_index, test_index in kf.split(\r\n range(len(train_data))):\r\n x_train, x_val, y_train, y_val = train_data[train_index], train_data[test_index], train_labels[train_index], train_labels[test_index]\r\n mlp = MLP(x_train, y_train).mlpclassifier(solver, activation, alpha, hidden_layer_size)\r\n accuracy += mlp.score(x_val, y_val)\r\n score = accuracy/10\r\n print(\"Solver: \", solver, \" Activation: \", activation, \" Learning Rate: \", alpha, \" Layer Size: \", hidden_layer_size, \" has cross val accuracy: \", score)\r\n if score > best_accuracy:\r\n best_solver = solver\r\n best_activation = activation\r\n best_learning_rate = alpha\r\n best_layer_size = hidden_layer_size\r\n return best_solver, best_activation, best_learning_rate, best_layer_size\r\n\r\n\r\ndef classification_accuracy(clf, eval_data, eval_labels):\r\n '''\r\n Evaluate the classification accuracy of knn on the given 'eval_data'\r\n using the labels\r\n '''\r\n\r\n return clf.score(eval_data, eval_labels)\r\n\r\n\r\ndef main():\r\n train_data, train_labels, test_data, test_labels = data.load_all_data('data')\r\n mlp = MLP(train_data, train_labels)\r\n solver, activation, learning_rate, layer_size = cross_validation(train_data, train_labels)\r\n clf = mlp.mlpclassifier(solver, activation, learning_rate, layer_size)\r\n print(\"Solver: \", solver, \" Activation: \", activation, \" Learning Rate: \",\r\n learning_rate, \" Layer Size: \", layer_size)\r\n print(clf.predict(test_data))\r\n print(\"Test accuracy: \",\r\n classification_accuracy(clf, test_data, test_labels))\r\n print(\"Train accuracy: \",\r\n classification_accuracy(clf, train_data, train_labels))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n", "repo_name": "VaskarNath/ml-projects", "sub_path": "Assignment 3/MLPClassifier.py", "file_name": "MLPClassifier.py", "file_ext": "py", "file_size_in_byte": 3866, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "sklearn.neural_network.MLPClassifier", "line_number": 30, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 57, "usage_type": "call"}, {"api_name": "data.load_all_data", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "24252488383", "text": "from __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom future.builtins import * # NOQA\n\nfrom obspy.core.util import Enum\n\n\nOriginUncertaintyDescription = Enum([\n \"horizontal uncertainty\",\n \"uncertainty ellipse\",\n \"confidence ellipsoid\",\n])\n\nAmplitudeCategory = Enum([\n \"point\",\n \"mean\",\n \"duration\",\n \"period\",\n \"integral\",\n \"other\",\n])\n\nOriginDepthType = Enum([\n \"from location\",\n \"from moment tensor inversion\",\n \"from modeling of broad-band P waveforms\",\n \"constrained by depth phases\",\n \"constrained by direct phases\",\n \"constrained by depth and direct phases\",\n \"operator assigned\",\n \"other\",\n])\n\nOriginType = Enum([\n \"hypocenter\",\n \"centroid\",\n \"amplitude\",\n \"macroseismic\",\n \"rupture start\",\n \"rupture end\",\n])\n\nMTInversionType = Enum([\n \"general\",\n \"zero trace\",\n \"double couple\",\n])\n\nEvaluationMode = Enum([\n \"manual\",\n \"automatic\",\n])\n\nEvaluationStatus = Enum([\n \"preliminary\",\n \"confirmed\",\n \"reviewed\",\n \"final\",\n \"rejected\",\n])\n\nPickOnset = Enum([\n \"emergent\",\n \"impulsive\",\n \"questionable\",\n])\n\nDataUsedWaveType = Enum([\n \"P waves\",\n \"body waves\",\n \"surface waves\",\n \"mantle waves\",\n \"combined\",\n \"unknown\",\n])\n\nAmplitudeUnit = Enum([\n \"m\",\n \"s\",\n \"m/s\",\n \"m/(s*s)\",\n \"m*s\",\n \"dimensionless\",\n \"other\",\n])\n\nEventDescriptionType = Enum([\n \"felt report\",\n \"Flinn-Engdahl region\",\n \"local time\",\n \"tectonic summary\",\n \"nearest cities\",\n \"earthquake name\",\n \"region name\",\n])\n\nMomentTensorCategory = Enum([\n \"teleseismic\",\n \"regional\",\n])\n\nEventType = Enum([\n \"not existing\",\n \"not reported\",\n \"earthquake\",\n \"anthropogenic event\",\n \"collapse\",\n \"cavity collapse\",\n \"mine collapse\",\n \"building collapse\",\n \"explosion\",\n \"accidental explosion\",\n \"chemical explosion\",\n \"controlled explosion\",\n \"experimental explosion\",\n \"industrial explosion\",\n \"mining explosion\",\n \"quarry blast\",\n \"road cut\",\n \"blasting levee\",\n \"nuclear explosion\",\n \"induced or triggered event\",\n \"rock burst\",\n \"reservoir loading\",\n \"fluid injection\",\n \"fluid extraction\",\n \"crash\",\n \"plane crash\",\n \"train crash\",\n \"boat crash\",\n \"other event\",\n \"atmospheric event\",\n \"sonic boom\",\n \"sonic blast\",\n \"acoustic noise\",\n \"thunder\",\n \"avalanche\",\n \"snow avalanche\",\n \"debris avalanche\",\n \"hydroacoustic event\",\n \"ice quake\",\n \"slide\",\n \"landslide\",\n \"rockslide\",\n \"meteorite\",\n \"volcanic eruption\",\n], replace={'other': 'other event'})\n\nEventTypeCertainty = Enum([\n \"known\",\n \"suspected\",\n])\n\nSourceTimeFunctionType = Enum([\n \"box car\",\n \"triangle\",\n \"trapezoid\",\n \"unknown\",\n])\n\nPickPolarity = Enum([\n \"positive\",\n \"negative\",\n \"undecidable\",\n])\n", "repo_name": "turnbulm/obspy-kelunji-classic", "sub_path": "obspy/core/event_header.py", "file_name": "event_header.py", "file_ext": "py", "file_size_in_byte": 2919, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "94", "api": [{"api_name": "obspy.core.util.Enum", "line_number": 8, "usage_type": "call"}, {"api_name": "obspy.core.util.Enum", "line_number": 14, "usage_type": "call"}, {"api_name": "obspy.core.util.Enum", "line_number": 23, "usage_type": "call"}, {"api_name": "obspy.core.util.Enum", "line_number": 34, "usage_type": "call"}, {"api_name": "obspy.core.util.Enum", "line_number": 43, "usage_type": "call"}, {"api_name": "obspy.core.util.Enum", "line_number": 49, "usage_type": "call"}, {"api_name": "obspy.core.util.Enum", "line_number": 54, "usage_type": "call"}, {"api_name": "obspy.core.util.Enum", "line_number": 62, "usage_type": "call"}, {"api_name": "obspy.core.util.Enum", "line_number": 68, "usage_type": "call"}, {"api_name": "obspy.core.util.Enum", "line_number": 77, "usage_type": "call"}, {"api_name": "obspy.core.util.Enum", "line_number": 87, "usage_type": "call"}, {"api_name": "obspy.core.util.Enum", "line_number": 97, "usage_type": "call"}, {"api_name": "obspy.core.util.Enum", "line_number": 102, "usage_type": "call"}, {"api_name": "obspy.core.util.Enum", "line_number": 149, "usage_type": "call"}, {"api_name": "obspy.core.util.Enum", "line_number": 154, "usage_type": "call"}, {"api_name": "obspy.core.util.Enum", "line_number": 161, "usage_type": "call"}]} +{"seq_id": "74395976309", "text": "import configparser\nfrom datetime import datetime\nfrom typing import Tuple\nimport atexit\nimport time\n\nimport Adafruit_DHT\nimport grovepi\n\n\ndef get_humidity_and_temperature(pin: int) -> Tuple[datetime, float, float]:\n timestamp = datetime.now()\n humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.AM2302, pin)\n return timestamp, humidity, temperature\n\n\ndef get_noise(pin: int) -> Tuple[datetime, int]:\n grovepi.pinMode(pin, 'INPUT')\n timestamp = datetime.now()\n values = []\n samples = 5\n for i in range(samples):\n value = grovepi.analogRead(pin)\n values.append(value)\n time.sleep(0.1)\n avg_value = int(round(sum(values) / samples))\n return timestamp, avg_value\n\n\ndef get_dust() -> Tuple[datetime, int]:\n atexit.register(grovepi.dust_sensor_dis)\n grovepi.dust_sensor_en()\n attempts = 100\n for _ in range(attempts):\n timestamp = datetime.now()\n new_val, low_pulse_occupancy = grovepi.dustSensorRead()\n if new_val:\n return timestamp, low_pulse_occupancy\n time.sleep(3)\n else:\n raise ValueError('Cannot read values from dust sensor')\n\n\ndef read_sensors_data(config: configparser.ConfigParser) -> dict:\n data = {}\n prefix = config.get('carbon', 'prefix')\n\n # Humidity and Temperature\n humidity_and_temperature_pin = int(\n config.get('sensors', 'humidity_and_temperature_pin'))\n timestamp, humidity, temperature = get_humidity_and_temperature(\n humidity_and_temperature_pin)\n data[prefix + '.temperature'] = (timestamp, temperature)\n data[prefix + '.humidity'] = (timestamp, humidity)\n\n # Noise\n sound_pin = int(\n config.get('sensors', 'sound_pin'))\n timestamp, noise = get_noise(sound_pin)\n data[prefix + '.noise'] = (timestamp, noise)\n\n # Dust\n try:\n timestamp, dust = get_dust()\n data[prefix + '.dust'] = (timestamp, dust)\n except ValueError:\n pass\n\n return data\n\n\ndef self_check():\n config = configparser.ConfigParser()\n config.read('config.ini')\n data = read_sensors_data(config)\n print(data)\n\n\nif __name__ == '__main__':\n self_check()\n", "repo_name": "citizen-stig/home-sensors", "sub_path": "sensors.py", "file_name": "sensors.py", "file_ext": "py", "file_size_in_byte": 2158, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "datetime.datetime.now", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 12, "usage_type": "name"}, {"api_name": "Adafruit_DHT.read_retry", "line_number": 13, "usage_type": "call"}, {"api_name": "Adafruit_DHT.AM2302", "line_number": 13, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 11, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 11, "usage_type": "name"}, {"api_name": "grovepi.pinMode", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "name"}, {"api_name": "grovepi.analogRead", "line_number": 23, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 25, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 17, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 17, "usage_type": "name"}, {"api_name": "atexit.register", "line_number": 31, "usage_type": "call"}, {"api_name": "grovepi.dust_sensor_dis", "line_number": 31, "usage_type": "attribute"}, {"api_name": "grovepi.dust_sensor_en", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 35, "usage_type": "name"}, {"api_name": "grovepi.dustSensorRead", "line_number": 36, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 39, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 30, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "name"}, {"api_name": "configparser.ConfigParser", "line_number": 44, "usage_type": "attribute"}, {"api_name": "configparser.ConfigParser", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "69968176318", "text": "import os\nimport sys\nimport dataset\nimport torch\nimport torch.nn as nn\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torch.utils.data import Dataset\nimport matplotlib.pyplot as plt\ntransform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\nbatch_size = 64\n\n\n\n\n################################ Euclidean Transformer ######################################\nfrom modeling_euct import EuclideanTransformerPreTrainedModel, EuclideanTransformerModel\n########################### End of EuclideanTransformer #########\n\nimport transformers\nimport random\nfrom transformers import XLNetConfig\nfrom tqdm import tqdm\nfrom transformers.optimization import AdamW\n\nclass Classifier(EuclideanTransformerPreTrainedModel):\n def __init__(self, config: XLNetConfig):\n super().__init__(config)\n self.config = config\n self.model = EuclideanTransformerModel(config=config)\n self.output_layer = nn.Linear(config.d_model, 10)\n self.mask_reconstruction = nn.Linear(config.d_model, 3 * 8 * 8)\n\n self.init_weights()\n #####################################################################################\n\n def forward(self, x, mask_info=None):\n ########################################Your Code####################################\n batch_size, C, H, W = x.shape\n pixel_embeddings, semantic_embeddings = self.model(x)\n semantic_embeddings = pixel_embeddings.mean(dim=1).mean(dim=1)\n if mask_info is None:\n x = self.output_layer(semantic_embeddings.reshape(batch_size, self.config.d_model))\n else:\n return torch.zeros((batch_size,), device=x.device)\n \n #####################################################################################\n return x, None, None\n\nconfig = XLNetConfig.from_pretrained(\"xlnet-base-cased\")\nFastConv = Classifier\nrandom.seed(0)\ntorch.random.manual_seed(0)\nmodel = FastConv(config).cuda()\nloss_fn = torch.nn.CrossEntropyLoss()\nfrom transformers import get_linear_schedule_with_warmup, get_constant_schedule_with_warmup, get_constant_schedule\noptimizer = AdamW(lr=5e-5, weight_decay=0.02,\n eps=1e-8, params=model.parameters())\n# lr_scheduler = get_constant_schedule(opt)\nlr_scheduler = get_linear_schedule_with_warmup(optimizer, num_training_steps=1000000, num_warmup_steps=100)\n\n\ndef get_lr(optimizer):\n for param_group in optimizer.param_groups:\n return param_group['lr']\n\nclass PatchMaskWrapperDataset(object):\n def __init__(self, base_dataset):\n self.data = base_dataset\n\n def __getitem__(self, item):\n inputs, labels = self.data[item]\n x, y = random.randint(0, 3), random.randint(0, 3)\n inputs_ = inputs + 0.0\n\n mask_tgt = inputs[:, 8 * x: 8 * x + 8, 8 * y: 8 * y + 8]\n inputs_[:, 8 * x: 8 * x + 8, 8 * y: 8 * y + 8] = 0.0\n return inputs, labels, inputs_, x, y, mask_tgt\n\n def __len__(self):\n return self.data.__len__()\n\ntrainset = torchvision.datasets.CIFAR10(root='./data', train=True,\n download=True, transform=transform)\ntraining_loader = torch.utils.data.DataLoader(PatchMaskWrapperDataset(trainset), batch_size=batch_size,\n shuffle=True, num_workers=2)\n\ntestset = torchvision.datasets.CIFAR10(root='./data', train=False,\n download=True, transform=transform)\nvalidation_loader = torch.utils.data.DataLoader(PatchMaskWrapperDataset(testset), batch_size=batch_size,\n shuffle=False, num_workers=2)\n\n\n\n\ndef train_one_epoch(epoch_index, mode=\"both\"):\n running_loss = 0.\n running_recons_loss = 0.\n last_loss = 0.\n last_recons_loss = 0.\n len_samples = 0\n # Here, we use enumerate(training_loader) instead of\n # iter(training_loader) so that we can track the batch\n # index and do some intra-epoch reporting\n iterator = tqdm((training_loader))\n for i, data in enumerate(iterator):\n # Every data instance is an input + label pair\n inputs, labels, inputs_masked, x, y, mask_tgt = data\n inputs = inputs.cuda()\n labels = labels.cuda()\n inputs_masked = inputs_masked.cuda()\n x = x.cuda()\n y = y.cuda()\n mask_tgt = mask_tgt.cuda()\n\n # inputs = inputs.permute(0,3,1,2)\n # inputs_masked = inputs_masked.permute(0,3,1,2)\n len_samples += inputs.shape[0]\n # Zero your gradients for every batch!\n optimizer.zero_grad()\n \n # Make predictions for this batch\n # recons_loss = model(inputs_masked, mask_info=(x, y, mask_tgt))\n outputs, _, _ = model(inputs)\n\n # Get\n\n # Compute the loss and its gradients\n loss = outputs.log_softmax(dim=-1).gather(dim=-1, index=labels.unsqueeze(dim=-1)).reshape((outputs.shape[0],))\n loss = -loss.mean()\n recons_loss = 0.0 # recons_loss.mean()\n\n if mode == \"both\":\n (loss + 50.0 * recons_loss).backward()\n elif mode == \"finetune\":\n loss.backward()\n \n # Adjust learning weights\n optimizer.step()\n lr_scheduler.step()\n \n # Gather data and report\n running_loss += loss.item() * outputs.shape[0]\n # running_recons_loss += recons_loss.item() * outputs.shape[0]\n if i % 20 == 0:\n last_loss = running_loss / len_samples\n # last_recons_loss = running_recons_loss / len_samples\n # iterator.write(' batch {} loss: {} recons_loss: {} lr: {}'.format(i + 1, last_loss, last_recons_loss, get_lr(optimizer)))\n iterator.write(' batch {} loss: {} lr: {}'.format(i + 1, last_loss, get_lr(optimizer)))\n \n last_loss = running_loss / len_samples\n # last_recons_loss = running_recons_loss / len_samples\n # print(' batch {} loss: {} recons_loss: {} lr: {}'.format(i + 1, last_loss, last_recons_loss, get_lr(optimizer)))\n print(' batch {} loss: {} lr: {}'.format(i + 1, last_loss, get_lr(optimizer)))\n\n return last_loss, last_recons_loss\n\nEPOCHS = 1000\ntest_data = dataset.MiniPlaces(\n split='test',\n root_dir = GOOGLE_DRIVE_PATH\n)\nbest_vloss = 0.00\n\nfor epoch in range(EPOCHS):\n print('EPOCH {}:'.format(epoch))\n \n # Make sure gradient tracking is on, and do a pass over the data\n model.train(True)\n avg_loss, avg_recons_loss = train_one_epoch(epoch, mode=\"finetune\")\n \n # We don't need gradients on to do reporting\n model.eval()\n \n running_vloss = 0.0\n running_recons_vloss = 0.\n total_vacc = 0.\n len_samples = 0\n # for params in optimizer.param_groups:\n # params['lr'] *= 0.9\n with torch.no_grad():\n for i, vdata in enumerate(validation_loader):\n inputs, labels, inputs_masked, x, y, mask_tgt = vdata\n inputs = inputs.cuda()\n labels = labels.cuda()\n inputs_masked = inputs_masked.cuda()\n x = x.cuda()\n y = y.cuda()\n mask_tgt = mask_tgt.cuda()\n\n len_samples += inputs.shape[0]\n # inputs = inputs.permute(0,3,1,2)\n # inputs_masked = inputs_masked.permute(0,3,1,2)\n outputs, _, _ = model(inputs)\n # recons_loss = model(inputs_masked, mask_info=(x, y, mask_tgt))\n\n loss = loss_fn(outputs, labels)\n\n outputs = torch.argmax(outputs, -1)\n acc = torch.sum(torch.eq(outputs, labels))\n total_vacc += acc.item()\n\n running_vloss += loss.item() * inputs.shape[0]\n # running_recons_vloss += recons_loss.mean().item() * inputs.shape[0]\n\n print ('validation acc is: %f'%(total_vacc/len_samples))\n if (total_vacc/len_samples) > best_vloss:\n best_vloss = total_vacc/len_samples\n # with open(\"test_UID.txt\", \"w\") as fout:\n # for i, vdata in tqdm(enumerate(test_data)):\n # vinputs, _, title = vdata\n # vinputs = vinputs.cuda()\n # vinputs = vinputs.unsqueeze(dim=0).permute(0,3,1,2)\n # voutputs, _, _ = model(vinputs)\n # print(\"%s %d\" % (title, voutputs.argmax(dim=-1).item()), file=fout)\n\n print ('best validation acc is: %f'%(best_vloss))\n\n avg_vloss = running_vloss / (len_samples)\n print('LOSS train {} train-recons {} valid {}'.format(avg_loss, avg_recons_loss, avg_vloss))\n # avg_recons_vloss = running_recons_vloss / (len_samples)\n # print('LOSS train {} train-recons {} valid {} valid-recons {}'.format(avg_loss, avg_recons_loss, avg_vloss, avg_recons_vloss))\n\n\n", "repo_name": "desire2020/WGAN-T", "sub_path": "classification_ablation_study.py", "file_name": "classification_ablation_study.py", "file_ext": "py", "file_size_in_byte": 8717, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "torchvision.transforms.Compose", "line_number": 11, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 11, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 12, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 12, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 13, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 13, "usage_type": "name"}, {"api_name": "modeling_euct.EuclideanTransformerPreTrainedModel", "line_number": 30, "usage_type": "name"}, {"api_name": "transformers.XLNetConfig", "line_number": 31, "usage_type": "name"}, {"api_name": "modeling_euct.EuclideanTransformerModel", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 49, "usage_type": "call"}, {"api_name": "transformers.XLNetConfig.from_pretrained", "line_number": 54, "usage_type": "call"}, {"api_name": "transformers.XLNetConfig", "line_number": 54, "usage_type": "name"}, {"api_name": "random.seed", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.random.manual_seed", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.random", "line_number": 57, "usage_type": "attribute"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 59, "usage_type": "attribute"}, {"api_name": "transformers.optimization.AdamW", "line_number": 61, "usage_type": "call"}, {"api_name": "transformers.get_linear_schedule_with_warmup", "line_number": 64, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 77, "usage_type": "call"}, {"api_name": "torchvision.datasets.CIFAR10", "line_number": 87, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 87, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 89, "usage_type": "attribute"}, {"api_name": "torchvision.datasets.CIFAR10", "line_number": 92, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 92, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 94, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 109, "usage_type": "call"}, {"api_name": "dataset.MiniPlaces", "line_number": 163, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 185, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 203, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 204, "usage_type": "call"}, {"api_name": "torch.eq", "line_number": 204, "usage_type": "call"}]} +{"seq_id": "28131011198", "text": "import json\nimport logging\nimport threading\nimport time\nfrom functools import lru_cache\nimport requests\nfrom collections import deque\nfrom multiprocessing.pool import ThreadPool\nfrom multiprocessing import Value\nfrom event_stream.event import Event\nfrom pubfinder_helper import PubFinderHelper\n\n\n@lru_cache(maxsize=10)\ndef fetch(doi):\n \"\"\"fetch response to add data to publication\n cache up to 100 since we should not have doi be occurring multiple times\n\n Arguments:\n doi: the doi to be fetched\n \"\"\"\n r = requests.get(SemanticScholarSource.base_url + requests.utils.quote(doi)) # check encoding\n if r.status_code == 200:\n json_response = r.json()\n if 'error' not in json_response:\n return json_response\n return None\n\n\ndef reset_api_limit(v, time_delta):\n logging.warning('reset semanticscholar api limit ' + str(v.value))\n with v.get_lock():\n v.value = 0\n api_limit_thread = threading.Timer(time_delta, reset_api_limit, args=[v, time_delta])\n api_limit_thread.daemon = True\n api_limit_thread.start()\n\n\nclass SemanticScholarSource(object):\n base_url = \"https://api.semanticscholar.org/v1/paper/\"\n\n tag = 'semanticscholar'\n log = 'SemanticScholar'\n work_queue = deque()\n work_pool = None\n running = True\n threads = 4\n api_limit = 95\n api_time = 300\n\n def __init__(self, result_deque):\n if not self.work_pool:\n self.work_pool = ThreadPool(self.threads, self.worker, ())\n self.result_deque = result_deque\n\n self.fetched_counter = Value('i', 0)\n api_limit_thread = threading.Timer(self.api_time, reset_api_limit, args=[self.fetched_counter, self.api_time])\n api_limit_thread.daemon = True\n api_limit_thread.start()\n self.api_reset_timestamp = int(time.time())\n\n def worker(self):\n \"\"\" main work function, fetch items and add data \"\"\"\n while self.running:\n try:\n item = self.work_queue.pop()\n except IndexError:\n time.sleep(0.1)\n pass\n else:\n if item:\n publication = PubFinderHelper.get_publication(item)\n logging.warning(self.log + \" work on item \" + publication['doi'])\n\n publication_temp = self.add_data_to_publication(publication)\n\n if publication_temp:\n publication = publication_temp\n\n if type(item) is Event:\n item.data['obj']['data'] = publication\n\n result = {'item': item, 'tag': self.tag}\n self.result_deque.append(result)\n\n def add_data_to_publication(self, publication):\n \"\"\" add data to a given publication using the doi to fetch a response and map the data \"\"\"\n response = self.api_limit_watcher(publication['doi'])\n return self.map(response, publication)\n\n def map_fields_of_study(self, fields):\n \"\"\" map fields of study \"\"\"\n result = []\n for field in fields:\n name = field\n normalized_name = PubFinderHelper.normalize(name)\n if not any(d['normalized_name'] == normalized_name for d in result):\n result.append({'name': name, 'normalized_name': normalized_name})\n return result\n\n def map(self, response_data, publication):\n \"\"\" map a xml response to the internal data structure \"\"\"\n added_data = False\n if response_data:\n\n if PubFinderHelper.should_update('title', response_data, publication):\n publication['title'] = PubFinderHelper.clean_title(response_data['title'])\n publication['normalized_title'] = PubFinderHelper.normalize(publication['title'])\n added_data = True\n\n if PubFinderHelper.should_update('year', response_data, publication):\n publication['year'] = response_data['year']\n added_data = True\n\n if 'venue' in response_data and 'publisher' not in publication:\n publication['publisher'] = response_data['venue']\n added_data = True\n\n if 'numCitedBy' in response_data and (\n 'citation_count' not in publication or publication['citation_count'] == 0):\n publication['citation_count'] = response_data['numCitedBy']\n added_data = True\n\n if PubFinderHelper.should_update('authors', response_data, publication):\n publication['authors'] = self.map_author(response_data['authors'])\n added_data = True\n\n if 'abstract' in response_data and (\n 'abstract' not in publication\n or not PubFinderHelper.valid_abstract(publication['abstract'])):\n abstract = PubFinderHelper.clean_abstract(response_data['abstract'])\n if PubFinderHelper.valid_abstract(abstract):\n publication['abstract'] = abstract\n added_data = True\n\n if PubFinderHelper.should_update('fields_of_study', response_data, publication):\n publication['fields_of_study'] = self.map_fields_of_study(response_data['fields_of_study'])\n added_data = True\n\n if added_data:\n source_ids = publication['source_id']\n source_ids.append(\n {'title': 'SemanticScholar', 'url': 'https://www.semanticscholar.org?utm_source=api',\n 'license': 'TODO'})\n publication['source_id'] = source_ids\n\n return publication\n\n def api_limit_watcher(self, doi):\n \"\"\" ensure api limits are kept and if the limit is reached wait for reset \"\"\"\n if self.fetched_counter.value < self.api_limit:\n with self.fetched_counter.get_lock():\n self.fetched_counter.value += 1\n return fetch(doi)\n else:\n wt = self.api_time - (int(time.time()) - self.api_reset_timestamp) % self.api_time + 1\n logging.warning(self.log + ' api limit reached, wait ' + str(wt))\n time.sleep(wt)\n self.api_limit_watcher(doi)\n\n def map_author(self, authors):\n \"\"\" amp authors \"\"\"\n result = []\n for author in authors:\n if 'name' in author:\n name = author['name']\n normalized_name = PubFinderHelper.normalize(name)\n result.append({\n 'name': name,\n 'normalized_name': normalized_name\n })\n else:\n logging.warning(self.log + ' no author name ' + json.dumps(author))\n return result\n", "repo_name": "ambalytics/amba-analysis-worker-pubfinder", "sub_path": "src/semanticscholar_source.py", "file_name": "semanticscholar_source.py", "file_ext": "py", "file_size_in_byte": 6704, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "requests.get", "line_number": 22, "usage_type": "call"}, {"api_name": "requests.utils.quote", "line_number": 22, "usage_type": "call"}, {"api_name": "requests.utils", "line_number": 22, "usage_type": "attribute"}, {"api_name": "functools.lru_cache", "line_number": 14, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 31, "usage_type": "call"}, {"api_name": "threading.Timer", "line_number": 34, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 44, "usage_type": "call"}, {"api_name": "multiprocessing.pool.ThreadPool", "line_number": 53, "usage_type": "call"}, {"api_name": "multiprocessing.Value", "line_number": 56, "usage_type": "call"}, {"api_name": "threading.Timer", "line_number": 57, "usage_type": "call"}, {"api_name": "time.time", "line_number": 60, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 68, "usage_type": "call"}, {"api_name": "pubfinder_helper.PubFinderHelper.get_publication", "line_number": 72, "usage_type": "call"}, {"api_name": "pubfinder_helper.PubFinderHelper", "line_number": 72, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 73, "usage_type": "call"}, {"api_name": "event_stream.event.Event", "line_number": 80, "usage_type": "name"}, {"api_name": "pubfinder_helper.PubFinderHelper.normalize", "line_number": 96, "usage_type": "call"}, {"api_name": "pubfinder_helper.PubFinderHelper", "line_number": 96, "usage_type": "name"}, {"api_name": "pubfinder_helper.PubFinderHelper.should_update", "line_number": 106, "usage_type": "call"}, {"api_name": "pubfinder_helper.PubFinderHelper", "line_number": 106, "usage_type": "name"}, {"api_name": "pubfinder_helper.PubFinderHelper.clean_title", "line_number": 107, "usage_type": "call"}, {"api_name": "pubfinder_helper.PubFinderHelper", "line_number": 107, "usage_type": "name"}, {"api_name": "pubfinder_helper.PubFinderHelper.normalize", "line_number": 108, "usage_type": "call"}, {"api_name": "pubfinder_helper.PubFinderHelper", "line_number": 108, "usage_type": "name"}, {"api_name": "pubfinder_helper.PubFinderHelper.should_update", "line_number": 111, "usage_type": "call"}, {"api_name": "pubfinder_helper.PubFinderHelper", "line_number": 111, "usage_type": "name"}, {"api_name": "pubfinder_helper.PubFinderHelper.should_update", "line_number": 124, "usage_type": "call"}, {"api_name": "pubfinder_helper.PubFinderHelper", "line_number": 124, "usage_type": "name"}, {"api_name": "pubfinder_helper.PubFinderHelper.valid_abstract", "line_number": 130, "usage_type": "call"}, {"api_name": "pubfinder_helper.PubFinderHelper", "line_number": 130, "usage_type": "name"}, {"api_name": "pubfinder_helper.PubFinderHelper.clean_abstract", "line_number": 131, "usage_type": "call"}, {"api_name": "pubfinder_helper.PubFinderHelper", "line_number": 131, "usage_type": "name"}, {"api_name": "pubfinder_helper.PubFinderHelper.valid_abstract", "line_number": 132, "usage_type": "call"}, {"api_name": "pubfinder_helper.PubFinderHelper", "line_number": 132, "usage_type": "name"}, {"api_name": "pubfinder_helper.PubFinderHelper.should_update", "line_number": 136, "usage_type": "call"}, {"api_name": "pubfinder_helper.PubFinderHelper", "line_number": 136, "usage_type": "name"}, {"api_name": "time.time", "line_number": 156, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 157, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 158, "usage_type": "call"}, {"api_name": "pubfinder_helper.PubFinderHelper.normalize", "line_number": 167, "usage_type": "call"}, {"api_name": "pubfinder_helper.PubFinderHelper", "line_number": 167, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 173, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 173, "usage_type": "call"}]} +{"seq_id": "1257209139", "text": "import tkinter\nimport tkinter.messagebox\nimport cv2\nimport PIL.Image, PIL.ImageTk\nimport os\nimport numpy as np\nimport matcher\n\nfrom controller_image_transforms import *\nfrom controller_dnets import *\n\nNUMBER_OF_CONTOURS = 3\n\nR_MIN = 0.1\nR_MAX = 5.0\n\nMIN_ANGLE = 0.1\nMAX_ANGLE = 1.57\n#MAX_ANGLE = 2.1\n\nimage1_number = 3\nimage2_number = 4\n\nPATH_TO_IMAGE_3 = \"../resources/images_initial/small_image3.jpg\"\nPATH_TO_IMAGE_4 = \"../resources/images_initial/small_image4.jpg\"\n\nPATH_TO_IMAGE_1 = PATH_TO_IMAGE_3 # \"../resources/images_initial/img1.jpg\"\nPATH_TO_IMAGE_2 = PATH_TO_IMAGE_3 # \"../resources/images_initial/img2_2016-03-01 21.42.11.jpg\"\nPATH_TO_IMAGE_3 = PATH_TO_IMAGE_3 # \"../resources/images_initial/img3_20160630_160547.jpg\"\nPATH_TO_IMAGE_4 = PATH_TO_IMAGE_4\nPATH_TO_IMAGE_5 = PATH_TO_IMAGE_3 # \"../resources/images_initial/img5_croc82.jpg\"\n\nif image1_number == 1:\n path_to_image1 = PATH_TO_IMAGE_1\nelif image1_number == 2:\n path_to_image1 = PATH_TO_IMAGE_2\nelif image1_number == 3:\n path_to_image1 = PATH_TO_IMAGE_3\nelif image1_number == 4:\n path_to_image1 = PATH_TO_IMAGE_4\nelif image1_number == 5:\n path_to_image1 = PATH_TO_IMAGE_5\nelse:\n path_to_image1 = PATH_TO_IMAGE_1\n\nif image2_number == 1:\n path_to_image2 = PATH_TO_IMAGE_1\nelif image2_number == 2:\n path_to_image2 = PATH_TO_IMAGE_2\nelif image2_number == 3:\n path_to_image2 = PATH_TO_IMAGE_3\nelif image2_number == 4:\n path_to_image2 = PATH_TO_IMAGE_4\nelif image2_number == 5:\n path_to_image2 = PATH_TO_IMAGE_5\nelse:\n path_to_image2 = PATH_TO_IMAGE_1\n\n\ndef recreate_output_file():\n if os.path.exists(\"../resources/csv/contours3_from_gui.csv\"):\n os.remove(\"../resources/csv/contours3_from_gui.csv\")\n\n\ndef is_unique_contour(test_cnts_list, contour):\n result = True\n for cnt in test_cnts_list:\n if cnt == contour:\n result = False\n break\n\n return result\n\n\ndef about_method():\n global cv_img1\n global cv_img2\n global cv_img3\n\n global gt_image1\n global gt_image2\n\n global photo1\n global photo2\n global photo3\n\n global test_cnts_list\n\n if len(test_cnts_list) == NUMBER_OF_CONTOURS:\n\n tkinter.messagebox.showinfo(title=\"Welcome\",\n message=\"Run App!\\n\" + \"contours number=\" + str(len(test_cnts_list)))\n\n test_cnts_list\n print(\"about_method\")\n else:\n tkinter.messagebox.showinfo(title=\"Welcome\",\n message=\"Choose 3 contours!\\n\" + \"contours number=\" + str(len(test_cnts_list)))\n\n\ndef exit_method():\n print(\"exit_method\")\n exit()\n\n\ndef GT_image1_method():\n global photo1\n\n global cv_img1\n\n global image1_number\n\n global path_to_image1\n\n global test_cnts_list\n\n cv_img1 = get_GT_image(image1_number)\n cv_img1 = cv2.cvtColor(cv_img1, cv2.COLOR_BGR2RGB)\n\n photo1 = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(cv_img1))\n canvas1.create_image(0, 0, image=photo1, anchor=tkinter.NW)\n\n test_cnts_list = []\n\n print(\"GT_image1_method\")\n\n\ndef GT_image2_method():\n global photo2\n\n global cv_img2\n\n global image2_number\n\n global path_to_image2\n\n global test_cnts_list\n\n cv_img2 = get_GT_image(image2_number)\n cv_img2 = cv2.cvtColor(cv_img2, cv2.COLOR_BGR2RGB)\n\n photo2 = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(cv_img2))\n canvas2.create_image(0, 0, image=photo2, anchor=tkinter.NW)\n\n print(\"GT_image2_method\")\n\n\ndef find_contours_of_image_1():\n global cv_img1\n global cv_img2\n global cv_img3\n\n global gt_image1\n global gt_image2\n\n global photo1\n global photo2\n global photo3\n\n global test_cnts_list\n\n image = get_GT_image(image1_number)\n\n img_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n ret, thresh = cv2.threshold(img_gray, 20, 255, 0)\n\n kernel = np.ones((2, 2), np.uint8)\n\n dilation = cv2.dilate(thresh, kernel, iterations=1)\n\n contours1, hierarchy = cv2.findContours(dilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n new_contours1 = []\n for cnt in contours1:\n area = cv2.contourArea(cnt)\n if area < 50000:\n new_contours1.append(cnt)\n\n cv_img1 = image\n for cnt in new_contours1:\n cv_img1 = cv2.drawContours(cv_img1, [cnt], 0, [0, 255, 0], -1)\n\n photo1 = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(cv_img1))\n canvas1.create_image(0, 0, image=photo1, anchor=tkinter.NW)\n\n test_cnts_list = []\n\n return new_contours1\n\n\ndef find_contours_of_image_2():\n global cv_img1\n global cv_img2\n global cv_img3\n\n global gt_image1\n global gt_image2\n\n global photo1\n global photo2\n global photo3\n\n image = get_GT_image(image2_number)\n\n img_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n ret, thresh = cv2.threshold(img_gray, 20, 255, 0)\n\n kernel = np.ones((2, 2), np.uint8)\n\n dilation = cv2.dilate(thresh, kernel, iterations=1)\n\n contours2, hierarchy = cv2.findContours(dilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n # path_to_file_csv_from_gui = '../resources/csv/mycsv4_all_50.csv'\n path_to_file_csv_from_gui = '../resources/csv/mycsv4_all.csv'\n contours2 = matcher.read_contours(path_to_file_csv_from_gui)\n\n new_contours2 = []\n for cnt in contours2:\n area = cv2.contourArea(cnt)\n if area < 50000:\n new_contours2.append(cnt)\n\n cv_img2 = image\n for cnt in new_contours2:\n cv_img2 = cv2.drawContours(cv_img2, [cnt], 0, [0, 255, 0], -1)\n\n photo2 = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(cv_img2))\n canvas2.create_image(0, 0, image=photo2, anchor=tkinter.NW)\n\n return new_contours2\n\n\ndef reset_image1_method():\n global photo1\n\n global cv_img1\n\n global image1_number\n\n global path_to_image1\n\n global test_cnts_list\n\n recreate_output_file()\n\n cv_img1 = cv2.cvtColor(cv2.imread(path_to_image1), cv2.COLOR_BGR2RGB)\n photo1 = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(cv_img1))\n canvas1.create_image(0, 0, image=photo1, anchor=tkinter.NW)\n\n test_cnts_list = []\n\n print(\"reset_image1_method\")\n\n\ndef reset_image2_method():\n global photo2\n\n global cv_img2\n\n global image2_number\n\n global path_to_image2\n\n global test_cnts_list\n\n cv_img2 = cv2.cvtColor(cv2.imread(path_to_image2), cv2.COLOR_BGR2RGB)\n photo2 = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(cv_img2))\n canvas2.create_image(0, 0, image=photo2, anchor=tkinter.NW)\n\n test_cnts_list = []\n\n print(\"reset_image2_method\")\n\n\ndef left_mouse_button_pressed_method(event):\n global cv_img1\n global cv_img2\n global cv_img3\n\n global gt_image1\n global gt_image2\n\n global photo1\n global photo2\n global photo3\n\n global c1\n\n global contours1_list\n global contours2_list\n\n global test_cnts_list\n global list_of_available_cnts_ids_all\n\n cv2.circle(cv_img1, (event.x, event.y), 3, (255, 0, 0), -1)\n\n photo1 = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(cv_img1))\n canvas1.create_image(0, 0, image=photo1, anchor=tkinter.NW)\n\n # get GT of images with numbers=image1_number and image2_number\n gt_image1 = get_GT_image(image1_number)\n gt_image2 = get_GT_image(image2_number)\n\n # get GT contours\n contours1_list = generate_contours_list_from_GROUND_TRUTH_file(gt_image1)\n # print('len(contours1_list)=', len(contours1_list))\n\n for i1 in range(len(contours1_list)):\n dist = cv2.pointPolygonTest(contours1_list[i1], (event.x, event.y), True)\n if dist > 0: # Positive value if the point is inside the contour\n if len(test_cnts_list) < NUMBER_OF_CONTOURS and is_unique_contour(test_cnts_list,\n contours1_list[i1]) == True:\n test_cnts_list.append(contours1_list[i1])\n print('len(test_cnts_list)=', len(test_cnts_list))\n\n if len(test_cnts_list) == 1:\n cv_img1 = get_GT_image(image1_number)\n cv_img1 = cv2.cvtColor(cv_img1, cv2.COLOR_BGR2RGB)\n\n photo1 = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(cv_img1))\n canvas1.create_image(0, 0, image=photo1, anchor=tkinter.NW)\n\n print('1 cnt in test_cnts_list')\n cv_img1 = cv2.drawContours(cv_img1, [test_cnts_list[0]], 0, [0, 255, 0], -1)\n\n list_of_available_cnts_ids_case1 = c1[i1].vn\n list_of_available_cnts_ids_all = list_of_available_cnts_ids_case1\n\n f = open('cnts1', 'w')\n for cnt1_id in list_of_available_cnts_ids_case1:\n f.write(str(cnt1_id) + ', ')\n f.close()\n\n\n elif len(test_cnts_list) == 2:\n cv_img1 = get_GT_image(image1_number)\n cv_img1 = cv2.cvtColor(cv_img1, cv2.COLOR_BGR2RGB)\n\n photo1 = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(cv_img1))\n canvas1.create_image(0, 0, image=photo1, anchor=tkinter.NW)\n\n print('2 cnt in test_cnts_list')\n cv_img1 = cv2.drawContours(cv_img1, [test_cnts_list[0]], 0, [0, 255, 0], -1)\n cv_img1 = cv2.drawContours(cv_img1, [test_cnts_list[1]], 0, [0, 255, 0], -1)\n\n file1 = open('cnts1', 'r')\n text1 = file1.read()\n list_of_available_cnts_ids_case1 = text1.split(', ')\n\n list_of_available_cnts_ids_case2 = c1[i1].vn\n\n\n\n list_of_available_cnts_ids_all = []\n new_list_of_available_cnts_ids_all = []\n for cnt111 in list_of_available_cnts_ids_case1:\n for cnt222 in list_of_available_cnts_ids_case2:\n if len(cnt111) > 0:\n if int(cnt111) == int(cnt222):\n list_of_available_cnts_ids_all.append(int(cnt111))\n # for id3 in list_of_available_cnts_ids_all:\n # flag=True\n # for cnt111 in list_of_available_cnts_ids_case1:\n # for cnt222 in list_of_available_cnts_ids_case2:\n # if len(cnt111) > 0:\n # if int(cnt111) == int(cnt222):\n # temp = matcher.init_triplet(c1, int(cnt111), int(cnt222), int(id3))\n # # validate triplet\n # if matcher.is_good_triplet(temp, MIN_ANGLE, MAX_ANGLE)==False:\n # flag=False\n # if flag==True:\n # new_list_of_available_cnts_ids_all.append(int(cnt111))\n # list_of_available_cnts_ids_all=new_list_of_available_cnts_ids_all\n f = open('cnts2', 'w')\n for cnt2_id in list_of_available_cnts_ids_all:\n f.write(str(cnt2_id) + ', ')\n f.close()\n print('list_of_available_cnts_ids_case1=', list_of_available_cnts_ids_case1)\n print('list_of_available_cnts_ids_case2=', list_of_available_cnts_ids_case2)\n print('list_of_available_cnts_ids_all=', list_of_available_cnts_ids_all)\n\n elif len(test_cnts_list) == 3:\n\n cv_img1 = get_GT_image(image1_number)\n cv_img1 = cv2.cvtColor(cv_img1, cv2.COLOR_BGR2RGB)\n\n photo1 = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(cv_img1))\n canvas1.create_image(0, 0, image=photo1, anchor=tkinter.NW)\n\n print('3 cnt in test_cnts_list')\n cv_img1 = cv2.drawContours(cv_img1, [test_cnts_list[0]], 0, [0, 255, 0], -1)\n cv_img1 = cv2.drawContours(cv_img1, [test_cnts_list[1]], 0, [0, 255, 0], -1)\n cv_img1 = cv2.drawContours(cv_img1, [test_cnts_list[2]], 0, [0, 255, 0], -1)\n\n file1 = open('cnts1', 'r')\n text1 = file1.read()\n list_of_available_cnts_ids_case1 = text1.split(', ')\n\n file2 = open('cnts2', 'r')\n text2 = file2.read()\n list_of_available_cnts_ids_case2 = text2.split(', ')\n\n list_of_available_cnts_ids_case3 = c1[i1].vn\n\n list_of_available_cnts_ids_all = []\n for cnt111 in list_of_available_cnts_ids_case1:\n for cnt222 in list_of_available_cnts_ids_case2:\n for cnt333 in list_of_available_cnts_ids_case3:\n if len(cnt111) > 0 and len(cnt222) > 0:\n if int(cnt111) == int(cnt222) and int(cnt111) == int(cnt333):\n list_of_available_cnts_ids_all.append(int(cnt111))\n print('list_of_available_cnts_ids_case1=', list_of_available_cnts_ids_case1)\n print('list_of_available_cnts_ids_case2=', list_of_available_cnts_ids_case2)\n print('list_of_available_cnts_ids_case3=', list_of_available_cnts_ids_case3)\n print('list_of_available_cnts_ids_all=', list_of_available_cnts_ids_all)\n\n for num in list_of_available_cnts_ids_all:\n cv_img1 = cv2.drawContours(cv_img1, [contours1_list[num]], 0, [255, 255, 0], 2)\n\n photo1 = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(cv_img1))\n canvas1.create_image(0, 0, image=photo1, anchor=tkinter.NW)\n\n if len(test_cnts_list) == NUMBER_OF_CONTOURS:\n tkinter.messagebox.showinfo(title=\"Triplet generation\",\n message=\"Well done! \" + str(NUMBER_OF_CONTOURS) + \" contours chosen!\")\n make_csv_from_cnts_list(path_to_save_csv_file='../resources/csv/contours3_from_gui.csv',\n cnts_list=test_cnts_list)\n try:\n result_image3, img1_with_cnts, img2_with_cnts = d_nets_method(image1_number, image2_number)\n except Exception as e:\n print(e)\n tkinter.messagebox.showinfo(title=\"Error!\",message=\"Bad contours. Try again\")\n exit_method()\n cv_img1 = img1_with_cnts\n cv_img1 = cv2.cvtColor(cv_img1, cv2.COLOR_BGR2RGB)\n\n cv_img2 = img2_with_cnts\n cv_img2 = cv2.cvtColor(cv_img2, cv2.COLOR_BGR2RGB)\n\n cv_img3 = result_image3\n cv_img3 = cv2.cvtColor(cv_img3, cv2.COLOR_BGR2RGB)\n\n dim = (width * 2, height)\n # resize image\n cv_img3 = cv2.resize(cv_img3, dim, interpolation=cv2.INTER_AREA)\n\n photo1 = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(cv_img1))\n canvas1.create_image(0, 0, image=photo1, anchor=tkinter.NW)\n\n photo2 = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(cv_img2))\n canvas2.create_image(0, 0, image=photo2, anchor=tkinter.NW)\n\n photo3 = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(cv_img3))\n canvas3.create_image(0, 0, image=photo3, anchor=tkinter.NW)\n\n elif len(test_cnts_list) == NUMBER_OF_CONTOURS:\n tkinter.messagebox.showinfo(title=\"Triplet generation\", message=\"Contours have been already chosen!\")\n\n\n# Create a window\nwindow = tkinter.Tk()\nwindow.title(\"OpenCV and Tkinter\")\nwindow.geometry(\"1400x700\")\nrecreate_output_file()\n\ntest_cnts_list = []\nlist_of_available_cnts_ids_all = []\n\npath_to_file_csv_from_gui = '../resources/csv/mycsv3_all.csv'\ncontours1_forcolorhelp = matcher.read_contours(path_to_file_csv_from_gui)\nc1 = matcher.get_contours_data(contours1_forcolorhelp)\nfor i in range(len(c1)):\n # check vertice status\n if c1[i].status == False:\n continue\n # find_triplet_neighbours(c, i, 15.0, 20.0)\n matcher.find_triplet_neighbours(c1, i, R_MIN, R_MAX)\n # print('c1[i].vn=',c1[i].vn)\n\nlabel_title = tkinter.Label(master=window,\n width=20,\n text=\"Crocodiles GUI Util\",\n font=(\"arial\", 19, \"bold\")\n ).place(x=90, y=5)\n\n# Load an image using OpenCV\ncv_img1 = cv2.cvtColor(cv2.imread(path_to_image1), cv2.COLOR_BGR2RGB)\ncv_img2 = cv2.cvtColor(cv2.imread(path_to_image2), cv2.COLOR_BGR2RGB)\n\n# Get the image dimensions (OpenCV stores image data as NumPy ndarray)\nheight, width, channels = cv_img1.shape\n\n# Create a canvas that can fit the above image\ncanvas1 = tkinter.Canvas(master=window, width=width, height=height)\ncanvas2 = tkinter.Canvas(master=window, width=width, height=height)\ncanvas3 = tkinter.Canvas(master=window, width=width * 2, height=height)\n\ncanvas1.place(x=10, y=10)\ncanvas2.place(x=700, y=10)\ncanvas3.place(x=250, y=400)\n\n# bind mouse events to canvas\ncanvas1.bind(sequence=\"\", func=left_mouse_button_pressed_method)\n\n# Use PIL (Pillow) to convert the NumPy ndarray to a PhotoImage\nphoto1 = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(cv_img1))\nphoto2 = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(cv_img2))\n\n# Add a PhotoImage to the Canvas\ncanvas1.create_image(0, 0, image=photo1, anchor=tkinter.NW)\ncanvas2.create_image(0, 0, image=photo2, anchor=tkinter.NW)\n\n# contours1_list = generate_contours_list_from_GROUND_TRUTH_file(cv_img1)\n# contours2_list = generate_contours_list_from_GROUND_TRUTH_file(cv_img2)\n\n# Button Run\nbtn_run = tkinter.Button(window, text=\"Run\", width=15, command=about_method)\nbtn_run.place(x=20, y=500)\n\n# Button Exit\nbtn_exit = tkinter.Button(window, text=\"Exit\", width=15, command=exit_method)\nbtn_exit.place(x=20, y=550)\n\n# Button GT image\nbtn_GT1 = tkinter.Button(window, text=\"GT\", width=15, command=GT_image1_method)\nbtn_GT1.place(x=20, y=330)\n\nbtn_GT2 = tkinter.Button(window, text=\"GT\", width=15, command=GT_image2_method)\nbtn_GT2.place(x=720, y=330)\n\n# Button Reset\nbtn_reset1 = tkinter.Button(window, text=\"Reset\", width=15, command=reset_image1_method)\nbtn_reset1.place(x=170, y=330)\n\nbtn_reset2 = tkinter.Button(window, text=\"Reset\", width=15, command=reset_image2_method)\nbtn_reset2.place(x=870, y=330)\n\n# Button Contours\nbtn_cnts1 = tkinter.Button(window, text=\"cnts\", width=15, command=find_contours_of_image_1)\nbtn_cnts1.place(x=320, y=330)\n\nbtn_cnts2 = tkinter.Button(window, text=\"cnts\", width=15, command=find_contours_of_image_2)\nbtn_cnts2.place(x=1020, y=330)\n\n# Create menu\nmenu = tkinter.Menu(master=window)\nwindow.config(menu=menu)\n\nsubmenu_1 = tkinter.Menu(master=menu)\nmenu.add_cascade(label=\"File\", menu=submenu_1)\nsubmenu_1.add_command(label=\"Exit\", command=exit_method)\n\nsubmenu_2 = tkinter.Menu(master=menu)\nmenu.add_cascade(label=\"Options\", menu=submenu_2)\nsubmenu_2.add_command(label=\"About\", command=about_method)\n\n# Run the window loop\nwindow.mainloop()\n", "repo_name": "DenisLaptev/croco_GUI_7", "sub_path": "app/src/gui_dnets.py", "file_name": "gui_dnets.py", "file_ext": "py", "file_size_in_byte": 19212, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "os.path.exists", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 62, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 91, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 91, "usage_type": "attribute"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 97, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 97, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 118, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 118, "usage_type": "attribute"}, {"api_name": "PIL.Image.ImageTk.PhotoImage", "line_number": 120, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk", "line_number": 120, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 120, "usage_type": "name"}, {"api_name": "PIL.Image.Image.fromarray", "line_number": 120, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 120, "usage_type": "attribute"}, {"api_name": "tkinter.NW", "line_number": 121, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 140, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 140, "usage_type": "attribute"}, {"api_name": "PIL.Image.ImageTk.PhotoImage", "line_number": 142, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk", "line_number": 142, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 142, "usage_type": "name"}, {"api_name": "PIL.Image.Image.fromarray", "line_number": 142, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 142, "usage_type": "attribute"}, {"api_name": "tkinter.NW", "line_number": 143, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 164, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2GRAY", "line_number": 164, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 167, "usage_type": "attribute"}, {"api_name": "cv2.dilate", "line_number": 169, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 171, "usage_type": "call"}, {"api_name": "cv2.RETR_TREE", "line_number": 171, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 171, "usage_type": "attribute"}, {"api_name": "cv2.contourArea", "line_number": 175, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 181, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk.PhotoImage", "line_number": 183, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk", "line_number": 183, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 183, "usage_type": "name"}, {"api_name": "PIL.Image.Image.fromarray", "line_number": 183, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 183, "usage_type": "attribute"}, {"api_name": "tkinter.NW", "line_number": 184, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 205, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2GRAY", "line_number": 205, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 208, "usage_type": "attribute"}, {"api_name": "cv2.dilate", "line_number": 210, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 212, "usage_type": "call"}, {"api_name": "cv2.RETR_TREE", "line_number": 212, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 212, "usage_type": "attribute"}, {"api_name": "matcher.read_contours", "line_number": 216, "usage_type": "call"}, {"api_name": "cv2.contourArea", "line_number": 220, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 226, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk.PhotoImage", "line_number": 228, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk", "line_number": 228, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 228, "usage_type": "name"}, {"api_name": "PIL.Image.Image.fromarray", "line_number": 228, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 228, "usage_type": "attribute"}, {"api_name": "tkinter.NW", "line_number": 229, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 247, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 247, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 247, "usage_type": "attribute"}, {"api_name": "PIL.Image.ImageTk.PhotoImage", "line_number": 248, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk", "line_number": 248, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 248, "usage_type": "name"}, {"api_name": "PIL.Image.Image.fromarray", "line_number": 248, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 248, "usage_type": "attribute"}, {"api_name": "tkinter.NW", "line_number": 249, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 267, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 267, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 267, "usage_type": "attribute"}, {"api_name": "PIL.Image.ImageTk.PhotoImage", "line_number": 268, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk", "line_number": 268, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 268, "usage_type": "name"}, {"api_name": "PIL.Image.Image.fromarray", "line_number": 268, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 268, "usage_type": "attribute"}, {"api_name": "tkinter.NW", "line_number": 269, "usage_type": "attribute"}, {"api_name": "cv2.circle", "line_number": 296, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk.PhotoImage", "line_number": 298, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk", "line_number": 298, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 298, "usage_type": "name"}, {"api_name": "PIL.Image.Image.fromarray", "line_number": 298, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 298, "usage_type": "attribute"}, {"api_name": "tkinter.NW", "line_number": 299, "usage_type": "attribute"}, {"api_name": "cv2.pointPolygonTest", "line_number": 310, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 319, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 319, "usage_type": "attribute"}, {"api_name": "PIL.Image.ImageTk.PhotoImage", "line_number": 321, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk", "line_number": 321, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 321, "usage_type": "name"}, {"api_name": "PIL.Image.Image.fromarray", "line_number": 321, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 321, "usage_type": "attribute"}, {"api_name": "tkinter.NW", "line_number": 322, "usage_type": "attribute"}, {"api_name": "cv2.drawContours", "line_number": 325, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 338, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 338, "usage_type": "attribute"}, {"api_name": "PIL.Image.ImageTk.PhotoImage", "line_number": 340, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk", "line_number": 340, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 340, "usage_type": "name"}, {"api_name": "PIL.Image.Image.fromarray", "line_number": 340, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 340, "usage_type": "attribute"}, {"api_name": "tkinter.NW", "line_number": 341, "usage_type": "attribute"}, {"api_name": "cv2.drawContours", "line_number": 344, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 345, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 386, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 386, "usage_type": "attribute"}, {"api_name": "PIL.Image.ImageTk.PhotoImage", "line_number": 388, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk", "line_number": 388, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 388, "usage_type": "name"}, {"api_name": "PIL.Image.Image.fromarray", "line_number": 388, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 388, "usage_type": "attribute"}, {"api_name": "tkinter.NW", "line_number": 389, "usage_type": "attribute"}, {"api_name": "cv2.drawContours", "line_number": 392, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 393, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 394, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 419, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk.PhotoImage", "line_number": 421, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk", "line_number": 421, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 421, "usage_type": "name"}, {"api_name": "PIL.Image.Image.fromarray", "line_number": 421, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 421, "usage_type": "attribute"}, {"api_name": "tkinter.NW", "line_number": 422, "usage_type": "attribute"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 425, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 425, "usage_type": "attribute"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 433, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 433, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 436, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 436, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 439, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 439, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 442, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 442, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 446, "usage_type": "call"}, {"api_name": "cv2.INTER_AREA", "line_number": 446, "usage_type": "attribute"}, {"api_name": "PIL.Image.ImageTk.PhotoImage", "line_number": 448, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk", "line_number": 448, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 448, "usage_type": "name"}, {"api_name": "PIL.Image.Image.fromarray", "line_number": 448, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 448, "usage_type": "attribute"}, {"api_name": "tkinter.NW", "line_number": 449, "usage_type": "attribute"}, {"api_name": "PIL.Image.ImageTk.PhotoImage", "line_number": 451, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk", "line_number": 451, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 451, "usage_type": "name"}, {"api_name": "PIL.Image.Image.fromarray", "line_number": 451, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 451, "usage_type": "attribute"}, {"api_name": "tkinter.NW", "line_number": 452, "usage_type": "attribute"}, {"api_name": "PIL.Image.ImageTk.PhotoImage", "line_number": 454, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk", "line_number": 454, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 454, "usage_type": "name"}, {"api_name": "PIL.Image.Image.fromarray", "line_number": 454, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 454, "usage_type": "attribute"}, {"api_name": "tkinter.NW", "line_number": 455, "usage_type": "attribute"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 458, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 458, "usage_type": "attribute"}, {"api_name": "tkinter.Tk", "line_number": 462, "usage_type": "call"}, {"api_name": "matcher.read_contours", "line_number": 471, "usage_type": "call"}, {"api_name": "matcher.get_contours_data", "line_number": 472, "usage_type": "call"}, {"api_name": "matcher.find_triplet_neighbours", "line_number": 478, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 481, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 488, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 488, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 488, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 489, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 489, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 489, "usage_type": "attribute"}, {"api_name": "tkinter.Canvas", "line_number": 495, "usage_type": "call"}, {"api_name": "tkinter.Canvas", "line_number": 496, "usage_type": "call"}, {"api_name": "tkinter.Canvas", "line_number": 497, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk.PhotoImage", "line_number": 507, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk", "line_number": 507, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 507, "usage_type": "name"}, {"api_name": "PIL.Image.Image.fromarray", "line_number": 507, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 507, "usage_type": "attribute"}, {"api_name": "PIL.Image.ImageTk.PhotoImage", "line_number": 508, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk", "line_number": 508, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 508, "usage_type": "name"}, {"api_name": "PIL.Image.Image.fromarray", "line_number": 508, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 508, "usage_type": "attribute"}, {"api_name": "tkinter.NW", "line_number": 511, "usage_type": "attribute"}, {"api_name": "tkinter.NW", "line_number": 512, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 518, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 522, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 526, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 529, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 533, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 536, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 540, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 543, "usage_type": "call"}, {"api_name": "tkinter.Menu", "line_number": 547, "usage_type": "call"}, {"api_name": "tkinter.Menu", "line_number": 550, "usage_type": "call"}, {"api_name": "tkinter.Menu", "line_number": 554, "usage_type": "call"}]} +{"seq_id": "73628911040", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n#!/usr/bin/env python\n# coding: utf-8\n\nimport argparse\nimport json\nimport numpy as np\nfrom tqdm import tqdm\nimport logging\nimport os\nimport pickle\nfrom enum import Enum\n\nfrom transformers import AutoTokenizer\nimport torch\nfrom torch.utils.data import TensorDataset\n\nMULTI_SEP_TOKENS_TOKENIZERS_SET = {\"roberta\", \"camembert\", \"bart\"}\n\ndef _is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n\n\ndef whitespace_tokenize(text):\n \"\"\"Runs basic whitespace cleaning and splitting on a piece of text.\"\"\"\n text = text.strip()\n if not text:\n return []\n tokens = text.split()\n return tokens\n\n\ndef _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text):\n \"\"\"Returns tokenized answer spans that better match the annotated answer.\"\"\"\n tok_answer_text = \" \".join(tokenizer.tokenize(orig_answer_text))\n\n for new_start in range(input_start, input_end + 1):\n for new_end in range(input_end, new_start - 1, -1):\n text_span = \" \".join(doc_tokens[new_start : (new_end + 1)])\n if text_span == tok_answer_text:\n return (new_start, new_end)\n\n return (input_start, input_end)\n\n\n\ndef _new_check_is_max_context(doc_spans, cur_span_index, position):\n \"\"\"Check if this is the 'max context' doc span for the token.\"\"\"\n # if len(doc_spans) == 1:\n # return True\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span[\"start\"] + doc_span[\"length\"] - 1\n if position < doc_span[\"start\"]:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span[\"start\"]\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + 0.01 * doc_span[\"length\"]\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n\n return cur_span_index == best_span_index\n\nclass ExplicitEnum(Enum):\n \"\"\"\n Enum with more explicit error message for missing values.\n \"\"\"\n @classmethod\n def _missing_(cls, value):\n raise ValueError(\n \"%r is not a valid %s, please select one of %s\"\n % (value, cls.__name__, str(list(cls._value2member_map_.keys())))\n )\n\n\nclass TruncationStrategy(ExplicitEnum):\n \"\"\"\n Possible values for the ``truncation`` argument in :meth:`PreTrainedTokenizerBase.__call__`.\n Useful for tab-completion in an IDE.\n \"\"\"\n\n ONLY_FIRST = \"only_first\"\n ONLY_SECOND = \"only_second\"\n LONGEST_FIRST = \"longest_first\"\n DO_NOT_TRUNCATE = \"do_not_truncate\"\n\nclass SquadExample:\n \"\"\"\n A single training/test example for the Squad dataset, as loaded from disk.\n Args:\n qas_id: The example's unique identifier\n question_text: The question string\n context_text: The context string\n answer_text: The answer string\n start_position_character: The character position of the start of the answer\n title: The title of the example\n answers: None by default, this is used during evaluation. Holds answers as well as their start positions.\n is_impossible: False by default, set to True if the example has no possible answer.\n \"\"\"\n\n def __init__(\n self,\n qas_id,\n question_text,\n context_text,\n answer_text,\n start_position_character,\n title,\n answers=[],\n is_impossible=False,\n ):\n self.qas_id = qas_id\n self.question_text = question_text\n self.context_text = context_text\n self.answer_text = answer_text\n self.title = title\n self.is_impossible = is_impossible\n self.answers = answers\n\n self.start_position, self.end_position = 0, 0\n\n doc_tokens = []\n char_to_word_offset = []\n prev_is_whitespace = True\n\n # Split on whitespace so that different tokens may be attributed to their original position.\n for c in self.context_text:\n if _is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1)\n\n self.doc_tokens = doc_tokens\n self.char_to_word_offset = char_to_word_offset\n\n # Start and end positions only has a value during evaluation. end_position is the real end position of answer(not the list index)\n if start_position_character is not None and not is_impossible:\n self.start_position = char_to_word_offset[start_position_character]\n self.end_position = char_to_word_offset[\n min(start_position_character + len(answer_text) - 1, len(char_to_word_offset) - 1)\n ]\n\n\nclass SquadFeatures:\n \"\"\"\n Single squad example features to be fed to a model.\n Those features are model-specific and can be crafted from :class:`~transformers.data.processors.squad.SquadExample`\n using the :method:`~transformers.data.processors.squad.squad_convert_examples_to_features` method.\n Args:\n input_ids: Indices of input sequence tokens in the vocabulary.\n attention_mask: Mask to avoid performing attention on padding token indices.\n token_type_ids: Segment token indices to indicate first and second portions of the inputs.\n cls_index: the index of the CLS token.\n p_mask: Mask identifying tokens that can be answers vs. tokens that cannot.\n Mask with 1 for tokens than cannot be in the answer and 0 for token that can be in an answer\n example_index: the index of the example\n unique_id: The unique Feature identifier\n paragraph_len: The length of the context\n token_is_max_context: List of booleans identifying which tokens have their maximum context in this feature object.\n If a token does not have their maximum context in this feature object, it means that another feature object\n has more information related to that token and should be prioritized over this feature for that token.\n tokens: list of tokens corresponding to the input ids\n token_to_orig_map: mapping between the tokens and the original text, needed in order to identify the answer.\n start_position: start of the answer token index\n end_position: end of the answer token index\n \"\"\"\n\n def __init__(\n self,\n input_ids,\n attention_mask,\n token_type_ids,\n cls_index,\n p_mask,\n example_index,\n unique_id,\n paragraph_len,\n token_is_max_context,\n tokens,\n token_to_orig_map,\n start_position,\n end_position,\n is_impossible,\n qas_id: str = None,\n ):\n self.input_ids = input_ids\n self.attention_mask = attention_mask\n self.token_type_ids = token_type_ids\n self.cls_index = cls_index\n self.p_mask = p_mask\n\n self.example_index = example_index\n self.unique_id = unique_id\n self.paragraph_len = paragraph_len\n self.token_is_max_context = token_is_max_context\n self.tokens = tokens\n self.token_to_orig_map = token_to_orig_map\n\n self.start_position = start_position\n self.end_position = end_position\n self.is_impossible = is_impossible\n self.qas_id = qas_id\n\n\n\ndef create_examples(input_json, set_type):\n with open(input_json, \"r\", encoding=\"utf-8\") as reader:\n input_data = json.load(reader)[\"data\"]\n is_training = set_type == \"train\"\n examples = []\n for entry in tqdm(input_data):\n title = entry[\"title\"]\n for paragraph in entry[\"paragraphs\"]:\n context_text = paragraph[\"context\"]\n for qa in paragraph[\"qas\"]:\n qas_id = qa[\"id\"]\n question_text = qa[\"question\"]\n start_position_character = None\n answer_text = None\n answers = []\n\n is_impossible = qa.get(\"is_impossible\", False) # for squad v2.0\n #print(\"is_impossible\",is_impossible)\n if not is_impossible:\n if is_training:\n answer = qa[\"answers\"][0] # only one answer per question in train dataset\n answer_text = answer[\"text\"]\n start_position_character = answer[\"answer_start\"]\n else:\n answers = qa[\"answers\"] #There are multiple answers per question in Dev dataset\n example = SquadExample(\n qas_id=qas_id,\n question_text=question_text,\n context_text=context_text,\n answer_text=answer_text,\n start_position_character=start_position_character,\n title=title,\n is_impossible=is_impossible,\n answers=answers,\n )\n examples.append(example)\n return examples\n\n \ndef squad_convert_example_to_features(\n example, max_seq_length, doc_stride, max_query_length, tokenizer, is_training):\n features = []\n if is_training and not example.is_impossible:\n # Get start and end position\n start_position = example.start_position\n end_position = example.end_position\n\n # If the answer cannot be found in the text, then skip this example.\n actual_text = \" \".join(example.doc_tokens[start_position : (end_position + 1)])\n cleaned_answer_text = \" \".join(whitespace_tokenize(example.answer_text))\n if actual_text.find(cleaned_answer_text) == -1:\n logger.warning(\"Could not find answer: '%s' vs. '%s'\", actual_text, cleaned_answer_text)\n return []\n\n tok_to_orig_index = []\n orig_to_tok_index = []\n all_doc_tokens = []\n for (i, token) in enumerate(example.doc_tokens):\n orig_to_tok_index.append(len(all_doc_tokens))\n sub_tokens = tokenizer.tokenize(token)\n for sub_token in sub_tokens:\n tok_to_orig_index.append(i)\n all_doc_tokens.append(sub_token)\n\n if is_training and not example.is_impossible:\n tok_start_position = orig_to_tok_index[example.start_position]\n if example.end_position < len(example.doc_tokens) - 1:\n tok_end_position = orig_to_tok_index[example.end_position + 1] - 1\n else:\n tok_end_position = len(all_doc_tokens) - 1\n\n (tok_start_position, tok_end_position) = _improve_answer_span(\n all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.answer_text\n )\n\n spans = []\n\n truncated_query = tokenizer.encode(\n example.question_text, add_special_tokens=False, truncation=True, max_length=max_query_length\n )\n\n # Tokenizers who insert 2 SEP tokens in-between & need to have special handling\n # in the way they compute mask of added tokens.\n tokenizer_type = type(tokenizer).__name__.replace(\"Tokenizer\", \"\").lower()\n sequence_added_tokens = (\n tokenizer.max_len - tokenizer.max_len_single_sentence + 1\n if tokenizer_type in MULTI_SEP_TOKENS_TOKENIZERS_SET\n else tokenizer.max_len - tokenizer.max_len_single_sentence\n )\n sequence_pair_added_tokens = tokenizer.max_len - tokenizer.max_len_sentences_pair\n\n span_doc_tokens = all_doc_tokens\n while len(spans) * doc_stride < len(all_doc_tokens):\n\n # Define the side we want to truncate / pad and the text/pair sorting\n if tokenizer.padding_side == \"right\":\n texts = truncated_query\n pairs = span_doc_tokens\n truncation = TruncationStrategy.ONLY_SECOND.value\n else:\n texts = span_doc_tokens\n pairs = truncated_query\n truncation = TruncationStrategy.ONLY_FIRST.value\n\n encoded_dict = tokenizer.encode_plus( # TODO(thom) update this logic\n texts,\n pairs,\n truncation=truncation,\n padding=\"max_length\",\n max_length=max_seq_length,\n return_overflowing_tokens=True,\n stride=max_seq_length - doc_stride - len(truncated_query) - sequence_pair_added_tokens,\n return_token_type_ids=True,\n )\n\n paragraph_len = min(\n len(all_doc_tokens) - len(spans) * doc_stride,\n max_seq_length - len(truncated_query) - sequence_pair_added_tokens,\n )\n\n if tokenizer.pad_token_id in encoded_dict[\"input_ids\"]:\n if tokenizer.padding_side == \"right\":\n non_padded_ids = encoded_dict[\"input_ids\"][: encoded_dict[\"input_ids\"].index(tokenizer.pad_token_id)]\n else:\n last_padding_id_position = (\n len(encoded_dict[\"input_ids\"]) - 1 - encoded_dict[\"input_ids\"][::-1].index(tokenizer.pad_token_id)\n )\n non_padded_ids = encoded_dict[\"input_ids\"][last_padding_id_position + 1 :]\n\n else:\n non_padded_ids = encoded_dict[\"input_ids\"]\n\n tokens = tokenizer.convert_ids_to_tokens(non_padded_ids)\n\n token_to_orig_map = {}\n for i in range(paragraph_len):\n index = len(truncated_query) + sequence_added_tokens + i if tokenizer.padding_side == \"right\" else i\n token_to_orig_map[index] = tok_to_orig_index[len(spans) * doc_stride + i]\n\n encoded_dict[\"paragraph_len\"] = paragraph_len\n encoded_dict[\"tokens\"] = tokens\n encoded_dict[\"token_to_orig_map\"] = token_to_orig_map\n encoded_dict[\"truncated_query_with_special_tokens_length\"] = len(truncated_query) + sequence_added_tokens\n encoded_dict[\"token_is_max_context\"] = {}\n encoded_dict[\"start\"] = len(spans) * doc_stride\n encoded_dict[\"length\"] = paragraph_len\n\n spans.append(encoded_dict)\n\n if \"overflowing_tokens\" not in encoded_dict or (\n \"overflowing_tokens\" in encoded_dict and len(encoded_dict[\"overflowing_tokens\"]) == 0\n ):\n break\n span_doc_tokens = encoded_dict[\"overflowing_tokens\"]\n\n for doc_span_index in range(len(spans)):\n for j in range(spans[doc_span_index][\"paragraph_len\"]):\n is_max_context = _new_check_is_max_context(spans, doc_span_index, doc_span_index * doc_stride + j)\n index = (\n j\n if tokenizer.padding_side == \"left\"\n else spans[doc_span_index][\"truncated_query_with_special_tokens_length\"] + j\n )\n spans[doc_span_index][\"token_is_max_context\"][index] = is_max_context\n\n for span in spans:\n # Identify the position of the CLS token\n cls_index = span[\"input_ids\"].index(tokenizer.cls_token_id)\n\n # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)\n # Original TF implem also keep the classification token (set to 0)\n p_mask = np.ones_like(span[\"token_type_ids\"])\n if tokenizer.padding_side == \"right\":\n p_mask[len(truncated_query) + sequence_added_tokens :] = 0\n else:\n p_mask[-len(span[\"tokens\"]) : -(len(truncated_query) + sequence_added_tokens)] = 0\n\n pad_token_indices = np.where(span[\"input_ids\"] == tokenizer.pad_token_id)\n special_token_indices = np.asarray(\n tokenizer.get_special_tokens_mask(span[\"input_ids\"], already_has_special_tokens=True)\n ).nonzero()\n\n p_mask[pad_token_indices] = 1\n p_mask[special_token_indices] = 1\n\n # Set the cls index to 0: the CLS index can be used for impossible answers\n p_mask[cls_index] = 0\n\n span_is_impossible = example.is_impossible\n start_position = 0\n end_position = 0\n if is_training and not span_is_impossible:\n # For training, if our document chunk does not contain an annotation\n # we throw it out, since there is nothing to predict.\n doc_start = span[\"start\"]\n doc_end = span[\"start\"] + span[\"length\"] - 1\n out_of_span = False\n\n if not (tok_start_position >= doc_start and tok_end_position <= doc_end):\n out_of_span = True\n\n if out_of_span:\n start_position = cls_index\n end_position = cls_index\n span_is_impossible = True\n else:\n if tokenizer.padding_side == \"left\":\n doc_offset = 0\n else:\n doc_offset = len(truncated_query) + sequence_added_tokens\n\n start_position = tok_start_position - doc_start + doc_offset\n end_position = tok_end_position - doc_start + doc_offset\n\n features.append(\n SquadFeatures(\n span[\"input_ids\"],\n span[\"attention_mask\"],\n span[\"token_type_ids\"],\n cls_index,\n p_mask.tolist(),\n example_index=0, # Can not set unique_id and example_index here. They will be set after multiple processing.\n unique_id=0, # the unique feature id\n paragraph_len=span[\"paragraph_len\"],\n token_is_max_context=span[\"token_is_max_context\"],\n tokens=span[\"tokens\"],\n token_to_orig_map=span[\"token_to_orig_map\"], # the token position in input_ids--->original context position(before tokenzation) \n start_position=start_position,# the start position in input_ids\n end_position=end_position,# the end position in input_ids\n is_impossible=span_is_impossible,\n qas_id=example.qas_id, # the unque question-answer id from original json file\n )\n )\n return features\n\ndef squad_convert_examples_to_features(\n examples,\n tokenizer,\n max_seq_length,\n doc_stride,\n max_query_length,\n is_training,\n padding_strategy=\"max_length\",\n tqdm_enabled=True,\n):\n \"\"\"\n Converts a list of examples into a list of features that can be directly given as input to a model.\n It is model-dependant and takes advantage of many of the tokenizer's features to create the model's inputs.\n Args:\n examples: list of :class:`~transformers.data.processors.squad.SquadExample`\n tokenizer: an instance of a child of :class:`~transformers.PreTrainedTokenizer`\n max_seq_length: The maximum sequence length of the inputs.\n doc_stride: The stride used when the context is too large and is split across several features.\n max_query_length: The maximum length of the query.\n is_training: whether to create features for model evaluation or model training.\n padding_strategy: Default to \"max_length\". Which padding strategy to use\n\n Returns:\n list of :class:`~transformers.data.processors.squad.SquadFeatures`\n\n \"\"\"\n\n # Defining helper methods\n features = []\n for example in tqdm(examples,total=len(examples),desc=\"convert example to features\", disable=not tqdm_enabled): \n features_from_one_example=squad_convert_example_to_features(example, \n max_seq_length, \n doc_stride, \n max_query_length, \n tokenizer, \n is_training)\n\n features.append(features_from_one_example)\n new_features = []\n unique_id = 1000000000\n example_index = 0\n for example_features in tqdm(\n features, total=len(features), desc=\"add example index and unique id\", disable=not tqdm_enabled\n ):\n if not example_features:\n continue\n for example_feature in example_features:\n example_feature.example_index = example_index\n example_feature.unique_id = unique_id\n new_features.append(example_feature)\n unique_id += 1\n example_index += 1\n features = new_features\n del new_features\n return features\n \ndef print_feature(feature):\n print('input_ids-----',feature.input_ids)\n print('attention_mask-----',feature.attention_mask)\n print('token_type_ids-----',feature.token_type_ids)\n print('cls_index-----',feature.cls_index)\n print('p_mask-----',feature.p_mask)\n print('example_index-----',feature.example_index)\n print('unique_id-----',feature.unique_id)\n print('paragraph_len-----',feature.paragraph_len)\n print('token_is_max_context-----',feature.token_is_max_context)\n print('tokens-----',feature.tokens)\n print('token_to_orig_map-----',feature.token_to_orig_map)\n print('start_position-----',feature.start_position)\n print('end_position-----',feature.end_position)\n print('is_impossible-----',feature.is_impossible)\n print('qas_id-----',feature.qas_id)\n\ndef index_to_str(num):\n return '0'*(3-len(str(num)))+str(num)\n\ndef bioasq_to_squad(input_json,training):\n with open(input_json, \"r\", encoding=\"utf-8\") as reader:\n input_data = json.load(reader)[\"questions\"]\n squad_entries=[]\n for entry in tqdm(input_data):\n if entry['type']!='factoid': #only extract the type of 'factoid' question \n continue\n question=entry['body'].strip()\n if training:\n answer_text=entry['exact_answer'][0].strip()\n else:\n answer_text=entry['exact_answer'][0][0].strip() \n for index,snippet in enumerate(entry[\"snippets\"]):\n id_num=entry['id']+'_'+index_to_str(index+1)\n context = snippet[\"text\"].strip()\n start=context.find(answer_text)\n if start!=-1:\n answer={\"text\":answer_text,\"answer_start\":start}\n else:\n continue\n new_entry={\n \"qas\": [\n {\n \"id\": id_num,\n \"question\": question,\n \"answers\": [answer]\n }\n ],\n \"context\": context\n }\n squad_entries.append(new_entry)\n return squad_entries\n\ndef entry_to_json(squad_entries,title=None,version=None):\n squad_json={\n \"data\": [\n {\n \"paragraphs\":squad_entries,\n \"title\":title\n }],\n \"version\":version\n }\n return squad_json\n\n\nclass SquadResult:\n \"\"\"\n Constructs a SquadResult which can be used to evaluate a model's output on the SQuAD dataset.\n Args:\n unique_id: The unique identifier corresponding to that example.\n start_logits: The logits corresponding to the start of the answer\n end_logits: The logits corresponding to the end of the answer\n \"\"\"\n\n def __init__(self, unique_id, start_logits, end_logits, start_top_index=None, end_top_index=None, cls_logits=None):\n self.start_logits = start_logits\n self.end_logits = end_logits\n self.unique_id = unique_id\n\n if start_top_index:\n self.start_top_index = start_top_index\n self.end_top_index = end_top_index\n self.cls_logits = cls_logits\n \n\ndef main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--model_type\",default='roberta',type=str)\n parser.add_argument(\"--model_name_or_path\",default='roberta-base',type=str)\n parser.add_argument(\"--tokenizer_name\",default=\"\",type=str)\n parser.add_argument(\"--cache_dir\",default=\"\",type=str)\n parser.add_argument(\"--max_seq_length\",default=384,type=int)\n parser.add_argument(\"--doc_stride\",default=128,type=int)\n parser.add_argument(\"--max_query_length\",default=64,type=int)\n parser.add_argument(\"--do_lower_case\", action=\"store_true\")\n\n args, _ = parser.parse_known_args()\n \n tokenizer = AutoTokenizer.from_pretrained(\n args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,\n do_lower_case=args.do_lower_case,\n cache_dir=args.cache_dir if args.cache_dir else None)\n \n max_seq_length=args.max_seq_length\n doc_stride=args.max_seq_length\n max_query_length=args.max_query_length\n \n #load SQuAD train dateset, generate train features, save to future use\n print(\"Load SQuAD train dateset, generate train features, save to future use\")\n train_examples=create_examples(\"../input/biomedical-questionanswer/train-v1.1.json\", \"train\")\n is_training=True\n train_features=squad_convert_examples_to_features(\n train_examples,\n tokenizer,\n max_seq_length,\n doc_stride,\n max_query_length,\n is_training,\n padding_strategy=\"max_length\",\n tqdm_enabled=True)\n with open('features_training.pickle', 'wb') as handle:\n pickle.dump(train_features, handle, protocol=pickle.HIGHEST_PROTOCOL)\n \n #load SQuAD dev dateset, generate valid features, save to future use\n print(\"Load SQuAD dev dateset, generate valid features, save to future use\")\n valid_examples=create_examples(\"../input/biomedical-questionanswer/dev-v1.1.json\", \"valid\")\n is_training=False\n valid_features=squad_convert_examples_to_features(\n valid_examples,\n tokenizer,\n max_seq_length,\n doc_stride,\n max_query_length,\n is_training,\n padding_strategy=\"max_length\",\n tqdm_enabled=True)\n with open('valid_features.pickle', 'wb') as handle:\n pickle.dump(valid_features, handle, protocol=pickle.HIGHEST_PROTOCOL)\n with open('valid_examples.pickle', 'wb') as handle:\n pickle.dump(valid_examples, handle, protocol=pickle.HIGHEST_PROTOCOL)\n \n #convert the BioASQ raw date into the same format as SQuAD, generate features,save for future use\n #train dataset\n print(\"Convert the BioASQ raw train date into the same format as SQuAD, generate features,save for future use\")\n trainining7b_entries=bioasq_to_squad(\"../input/biomedical-questionanswer/trainining7b.json\",training=True)\n trainining7b_squad_json=entry_to_json(trainining7b_entries)\n with open(\"bioasq7b_squad.json\", \"w\") as outfile: \n json.dump(trainining7b_squad_json, outfile,indent=2) \n train_bioasq_example=create_examples(\"../input/biomedical-questionanswer/bioasq7b_squad.json\", \"train\")\n is_training=True\n train_bioasq_features=squad_convert_examples_to_features(\n train_bioasq_example,\n tokenizer,\n max_seq_length,\n doc_stride,\n max_query_length,\n is_training,\n padding_strategy=\"max_length\",\n tqdm_enabled=True)\n with open('train_bioasq_example.pickle', 'wb') as handle:\n pickle.dump(train_bioasq_example, handle, protocol=pickle.HIGHEST_PROTOCOL)\n with open('train_bioasq_features.pickle', 'wb') as handle:\n pickle.dump(train_bioasq_features, handle, protocol=pickle.HIGHEST_PROTOCOL)\n \n #test dataset\n print(\"Convert the BioASQ raw test date into the same format as SQuAD, generate features,save for future use\")\n test_file_list=[\"7B1_golden.json\",\"7B2_golden.json\",\"7B3_golden.json\",\"7B4_golden.json\",\"7B5_golden.json\"]\n all_test_entries=[]\n i=0\n for file in test_file_list: \n filepath=os.path.join(\"../input/biomedical-questionanswer/\",file)\n entries=bioasq_to_squad(filepath,training=False)\n all_test_entries+=entries\n all_test_squad_json=entry_to_json(all_test_entries)\n with open(\"bioasq7b_gold_squad.json\", \"w\") as outfile: \n json.dump(all_test_squad_json, outfile,indent=2)\n test_bioasq_example=create_examples(\"../input/biomedical-questionanswer/bioasq7b_gold_squad.json\", \"test\")\n is_training=False \n test_bioasq_features=squad_convert_examples_to_features(\n test_bioasq_example,\n tokenizer,\n max_seq_length,\n doc_stride,\n max_query_length,\n is_training,\n padding_strategy=\"max_length\",\n tqdm_enabled=True)\n with open('test_bioasq_example.pickle', 'wb') as handle:\n pickle.dump(test_bioasq_example, handle, protocol=pickle.HIGHEST_PROTOCOL)\n with open('test_bioasq_features.pickle', 'wb') as handle:\n pickle.dump(test_bioasq_features, handle, protocol=pickle.HIGHEST_PROTOCOL)\n \n \nif __name__==\"__main__\":\n main()\n\n", "repo_name": "EmilyNLP/BioMedical-Question-Answering", "sub_path": "data-squad-bioasq.py", "file_name": "data-squad-bioasq.py", "file_ext": "py", "file_size_in_byte": 28615, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "97", "api": [{"api_name": "enum.Enum", "line_number": 75, "usage_type": "name"}, {"api_name": "json.load", "line_number": 224, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 393, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 399, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 400, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 486, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 498, "usage_type": "call"}, {"api_name": "json.load", "line_number": 535, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 537, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 599, "usage_type": "call"}, {"api_name": "transformers.AutoTokenizer.from_pretrained", "line_number": 612, "usage_type": "call"}, {"api_name": "transformers.AutoTokenizer", "line_number": 612, "usage_type": "name"}, {"api_name": "pickle.dump", "line_number": 635, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 635, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 651, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 651, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 653, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 653, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 661, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 674, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 674, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 676, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 676, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 684, "usage_type": "call"}, {"api_name": "os.path", "line_number": 684, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 689, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 702, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 702, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 704, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 704, "usage_type": "attribute"}]} +{"seq_id": "27517258057", "text": "import json\nimport os\nimport random\nfrom io import BytesIO\n\nimport discord\nfrom discord.ext import commands\nfrom discord.ext.commands import BadArgument, UserConverter, command\n\nfrom utils.checks import restricted, super_check, mods_only\n\nBASE_URL = \"https://api.weeb.sh\"\nIMAGES_URL = BASE_URL + \"/images\"\nRANDOM_URL = IMAGES_URL + \"/random\"\nGENERATE_URL = BASE_URL + \"/auto-image\"\nSHIP_URL = GENERATE_URL + \"/love-ship\"\nPICTURE_URL_1 = \"https://staging.weeb.sh/auto-image-v2/template/i-love-this-picture\"\nPICTURE_URL_2 = \"https://staging.weeb.sh/auto-image-v2/template/zero-two\"\n\n\nclass Fun:\n def __init__(self, bot):\n self.bot = bot\n self.feedback_channel = bot.get_channel(int(os.environ.get('FEEDBACK_CHANNEL') or bot._config['feedback_channel'])) # noqa: E501\n self.ship_comments = {\n 0: \"These two?? Together?! Don't make me laugh, Darling...\",\n 10: \"Not gonna happen...\",\n 20: \"I really doubt its gonna work out between these two.\",\n 30: \"Give up, this is doomed from the start.\",\n 40: \"It's slim, but you have a chance.\",\n 50: \"Might work, might not. Who knows? Sure as hell not me.\",\n 60: \"You have a real chance here! Give it your all!\",\n 70: \"Oh, I'm on-board all right!\",\n 80: \"Its a match made in heaven! Now kiss~\",\n 90: \"How come you're not all over eachother already!? You're a perfect couple!\",\n 100: \"This is how the gods meet their partners. It'd be a shame for mankind if you two don't get together.\"\n }\n\n async def generate_picture(self, url):\n body = json.dumps({'image': url})\n headers = {'Authorization': self.bot.img_auth, 'Content-Type': 'application/json'}\n resp = await self.bot.session.post(random.choice([PICTURE_URL_1, PICTURE_URL_2]), headers=headers, data=body)\n f = discord.File(BytesIO(await resp.read()), \"picture.png\")\n return f\n\n async def generate_ship(self, m1, m2):\n body = json.dumps({\"targetOne\": m1.avatar_url, \"targetTwo\": m2.avatar_url})\n headers = {'Authorization': self.bot.img_auth, 'Content-Type': 'application/json'}\n resp = await self.bot.session.post(SHIP_URL, headers=headers, data=body)\n\n f = discord.File(BytesIO(await resp.read()), \"ship.png\")\n embed = discord.Embed().set_image(url=\"attachment://ship.png\")\n return embed, f\n\n async def make_embed(self, title, type_):\n resp = await self.bot.session.get(RANDOM_URL, params={'type': type_},\n headers={'Authorization': self.bot.img_auth})\n data = await resp.json()\n url = data['url']\n\n return discord.Embed(title=title) \\\n .set_image(url=url) \\\n .set_footer(text=\"Powered by weeb.sh\")\n\n @command()\n @restricted()\n async def ship(self, ctx, user1: discord.Member = None, user2: discord.Member = None):\n \"\"\"Could these two be a good couple? What are the chances?\"\"\"\n\n if user1 is None:\n user1 = ctx.author\n user2 = random.choice(ctx.guild.members) # can disallow bots but it might be funny this way\n elif user2 is None:\n user2 = user1\n user1 = ctx.author\n\n random.seed(user1.id + user2.id)\n chance = random.randint(0, 100)\n comment = self.ship_comments[round(chance, -1)]\n embed, f = await self.generate_ship(user1, user2)\n embed.title = f\"Ship chance for {user1.name} and {user2.name}\"\n embed.description = f\"{chance}%!\\n{comment}\"\n await ctx.send(embed=embed, file=f)\n\n @ship.error\n async def rand_error(self, ctx, err):\n if isinstance(err, commands.BadArgument):\n return await ctx.send(\"I can't recognize who that is! Did you misspell their name, Darling?\")\n else:\n await self.bot.error_handler(ctx, err)\n\n @command()\n @restricted()\n async def sync(self, ctx, thing1, thing2=None):\n \"\"\"What is the sync % between these two??\"\"\"\n\n if thing2 is None:\n thing2 = thing1\n thing1 = ctx.author.name\n\n random.seed(len(thing1) + len(thing2))\n chance = random.randint(0, 100)\n await ctx.send(f\"**{thing1}** and **{thing2}** have a sync rate of {chance}%!\")\n\n @command()\n @restricted()\n async def pat(self, ctx, user: discord.Member = None):\n \"\"\"Pat someone!\"\"\"\n\n if user == ctx.author:\n return await ctx.send(f\"{ctx.author.name} patted... Themselves? For some reason.\")\n elif user is None:\n return await ctx.send(f\"{ctx.author.name} patted.... No one!\")\n\n embed = await self.make_embed(f\"{ctx.author.name} patted {user.name}!\", 'pat')\n await ctx.send(embed=embed)\n\n @command()\n @restricted()\n async def kiss(self, ctx, user: discord.Member = None):\n \"\"\"Kiss someone!\"\"\"\n\n if user == ctx.author:\n return await ctx.send(f\"{ctx.author.name} kissed... Themselves? How does that even work??\")\n elif user is None:\n return await ctx.send(f\"{ctx.author.name} kissed.... No one!\")\n\n embed = await self.make_embed(f\"{ctx.author.name} kissed {user.name}!\", 'kiss')\n await ctx.send(embed=embed)\n\n @command()\n @restricted()\n async def hug(self, ctx, user: discord.Member = None):\n \"\"\"Hug someone!\"\"\"\n\n if user == ctx.author:\n return await ctx.send(f\"{ctx.author.name} hugged... Themselves? I guess.\")\n elif user is None:\n return await ctx.send(f\"{ctx.author.name} hugged.... No one!\")\n\n embed = await self.make_embed(f\"{ctx.author.name} gave {user.name} a hug!\", 'hug')\n await ctx.send(embed=embed)\n\n @command()\n @restricted()\n async def lick(self, ctx, user: discord.Member = None):\n \"\"\"Lick someone!\"\"\"\n\n if user == ctx.author:\n return await ctx.send(f\"{ctx.author.name} licked... Themselves? What?\")\n elif user is None:\n return await ctx.send(f\"{ctx.author.name} licked.... No one!\")\n\n embed = await self.make_embed(f\"{ctx.author.name} Licked {user.name}!\", 'lick')\n await ctx.send(embed=embed)\n\n @command()\n @restricted()\n async def cuddle(self, ctx, user: discord.Member = None):\n \"\"\"Cuddle someone!\"\"\"\n\n if user == ctx.author:\n return await ctx.send(f\"{ctx.author.name} is cuddling all alone...\")\n elif user is None:\n return await ctx.send(f\"{ctx.author.name} cuddled.... No one!\")\n\n embed = await self.make_embed(f\"{ctx.author.name} cuddled {user.name}!\", 'cuddle')\n await ctx.send(embed=embed)\n\n @command()\n @restricted()\n async def picture(self, ctx, target: discord.Member = None):\n \"\"\"Does Zero Two like this picture?\"\"\"\n\n target = target or ctx.author\n async with ctx.typing():\n f = await self.generate_picture(target.avatar_url_as(format=\"png\", size=256))\n await ctx.send(file=f)\n\n class UserCreated(UserConverter):\n async def convert(self, ctx, arg):\n try:\n u = await super().convert(ctx, str(arg))\n return discord.utils.snowflake_time(u.id)\n except BadArgument:\n if isinstance(arg, int) or arg.isdigit():\n return discord.utils.snowflake_time(int(arg))\n else:\n raise BadArgument(\"Can only convert ID numbers.\")\n\n @command()\n async def feedback(self, ctx, *, msg):\n \"\"\"Send me (Synder#0690) feedback about the bot!\n bugs, feature requests, and any general feedback at all!\n \"\"\"\n embed = discord.Embed(title=\"Feedback recieved\", description=msg) \\\n .set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url)\n await self.feedback_channel.send(embed=embed)\n await ctx.message.add_reaction(\"\\N{WHITE HEAVY CHECK MARK}\")\n\n @command(hidden=True)\n async def age(self, ctx, created: UserCreated):\n \"\"\"Sends the account creation date for a user / member\"\"\"\n\n await ctx.send(created)\n\n @command()\n async def saturday(self, ctx):\n \"\"\"Saturday?\"\"\"\n\n await ctx.send(\"https://www.youtube.com/watch?v=hKqwULUwHFc\")\n\n def generate_message(self, messages):\n words = messages.split(' ')\n index = 1\n chain = {}\n for word in words[index:]:\n key = words[index - 1]\n if key in chain:\n chain[key].append(word)\n else:\n chain[key] = [word]\n index += 1\n\n word1 = random.choice(list(chain.keys()))\n message = word1.capitalize()\n count = 45\n while len(message.split(' ')) < count:\n word2 = random.choice(chain[word1])\n word1 = word2\n message += ' ' + word2\n\n if not message.endswith('.'):\n message += \".\"\n\n return message\n\n @command(aliases=[\"makechain\", \"channelchain\", \"makemessage\"])\n @super_check(mods_only)\n async def scramble(self, ctx, channel: discord.TextChannel = None, *members: discord.Member):\n \"\"\"Generates a message based on the last 1000 messages in a specified channel\n (or the current one if none was given).\n \"\"\"\n channel = channel or ctx.channel\n if channel.is_nsfw() and not ctx.channel.is_nsfw():\n return await ctx.send(\"Cannot post nsfw content in non-nsfw channels.\")\n async with ctx.typing():\n if not members:\n msgs = [m.clean_content async for m in channel.history(limit=1000)]\n else:\n msgs = []\n c = 0\n async for m in channel.history(limit=None):\n if m.author not in members:\n continue\n msgs.append(m.clean_content)\n c += 1\n if c == 1000:\n break\n\n msg = await self.bot.loop.run_in_executor(None, self.generate_message, \" \".join(msgs))\n if len(msg) >= 2000:\n await ctx.send(\"Result was too large! Posting a part of it.\")\n msg = msg[:2000]\n await ctx.send(msg)\n\n\ndef setup(bot):\n bot.add_cog(Fun(bot))\n", "repo_name": "doron-goldstein/02-bot", "sub_path": "cogs/fun.py", "file_name": "fun.py", "file_ext": "py", "file_size_in_byte": 10298, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "97", "api": [{"api_name": "os.environ.get", "line_number": 24, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 24, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 40, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 42, "usage_type": "call"}, {"api_name": "discord.File", "line_number": 43, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 43, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 47, "usage_type": "call"}, {"api_name": "discord.File", "line_number": 51, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 51, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 52, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 61, "usage_type": "call"}, {"api_name": "discord.Member", "line_number": 67, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 72, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 77, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 78, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 65, "usage_type": "call"}, {"api_name": "utils.checks.restricted", "line_number": 66, "usage_type": "call"}, {"api_name": "discord.ext.commands.BadArgument", "line_number": 87, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 87, "usage_type": "name"}, {"api_name": "random.seed", "line_number": 101, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 102, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 92, "usage_type": "call"}, {"api_name": "utils.checks.restricted", "line_number": 93, "usage_type": "call"}, {"api_name": "discord.Member", "line_number": 107, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.command", "line_number": 105, "usage_type": "call"}, {"api_name": "utils.checks.restricted", "line_number": 106, "usage_type": "call"}, {"api_name": "discord.Member", "line_number": 120, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.command", "line_number": 118, "usage_type": "call"}, {"api_name": "utils.checks.restricted", "line_number": 119, "usage_type": "call"}, {"api_name": "discord.Member", "line_number": 133, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.command", "line_number": 131, "usage_type": "call"}, {"api_name": "utils.checks.restricted", "line_number": 132, "usage_type": "call"}, {"api_name": "discord.Member", "line_number": 146, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.command", "line_number": 144, "usage_type": "call"}, {"api_name": "utils.checks.restricted", "line_number": 145, "usage_type": "call"}, {"api_name": "discord.Member", "line_number": 159, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.command", "line_number": 157, "usage_type": "call"}, {"api_name": "utils.checks.restricted", "line_number": 158, "usage_type": "call"}, {"api_name": "discord.Member", "line_number": 172, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.command", "line_number": 170, "usage_type": "call"}, {"api_name": "utils.checks.restricted", "line_number": 171, "usage_type": "call"}, {"api_name": "discord.ext.commands.UserConverter", "line_number": 180, "usage_type": "name"}, {"api_name": "discord.utils.snowflake_time", "line_number": 184, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 184, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.BadArgument", "line_number": 185, "usage_type": "name"}, {"api_name": "discord.utils.snowflake_time", "line_number": 187, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 187, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.BadArgument", "line_number": 189, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 196, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 191, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 201, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 207, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 225, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 229, "usage_type": "call"}, {"api_name": "discord.TextChannel", "line_number": 240, "usage_type": "attribute"}, {"api_name": "discord.Member", "line_number": 240, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.command", "line_number": 238, "usage_type": "call"}, {"api_name": "utils.checks.super_check", "line_number": 239, "usage_type": "call"}, {"api_name": "utils.checks.mods_only", "line_number": 239, "usage_type": "argument"}]} +{"seq_id": "5619460869", "text": "import moviepy.editor as mp\nimport argparse\nimport os\n\nparser = argparse.ArgumentParser(\"Process video into train data\")\n# -- Create the descriptions for the commands\nv_desc = \"The location of the input video\"\n\n# -- Create the arguments\nparser.add_argument(\n \"-v\", help=v_desc, default=\"/Volumes/gordonssd/tiktok/videos/letskillthislove.mp4\")\n\nargs = parser.parse_args()\nbasename = os.path.basename(args.v)\nvideoname = os.path.splitext(basename)[0]\n\nclip = mp.VideoFileClip(args.v)\n\nfor i, j in enumerate(range(0, int(clip.duration), 15)):\n if j+15 < clip.duration:\n snippet = clip.subclip(j, j+15)\n snippet.write_videofile(\n f\"/Volumes/gordonssd/tiktok/train_set/video/{videoname}{i}.mp4\", temp_audiofile=\"temp-audio.m4a\", remove_temp=True, audio_codec=\"aac\")\n", "repo_name": "gordon-lim/AI-Learns-to-TikTok", "sub_path": "scripts/process_video.py", "file_name": "process_video.py", "file_ext": "py", "file_size_in_byte": 792, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "94", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "moviepy.editor.VideoFileClip", "line_number": 17, "usage_type": "call"}, {"api_name": "moviepy.editor", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "4008568008", "text": "#!/usr/bin/env python3\n\nimport os\nfrom pathlib import Path\nimport numpy as np\nimport random\n\nimport hydra\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom drlnd.core.common.logger import get_default_logger\nfrom drlnd.core.common.replay_buffer import ReplayBuffer\nfrom drlnd.core.common.prioritized_replay_buffer import PrioritizedReplayBuffer\nfrom drlnd.core.agents.base_agent import AgentBase\nfrom drlnd.core.common.util import count_boundaries\n\nlogger = get_default_logger()\n\n\nclass DQNAgentSettings(dict):\n def __init__(self, **kwargs):\n self.buffer_size = int(5e4) # replay buffer size\n self.batch_size = 32 # minibatch size\n self.gamma = 1.0 # discount factor\n self.learning_rate = 5e-4 # learning rate\n self.update_period = 1 # how often to update the network\n # for soft update of target parameters\n self.train_delay_step = 10\n self.target_update_factor = 0.01\n self.target_update_period = 8\n self.device = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\n self.network = {'class': 'drlnd.core.networks.mlp.QNetworkMLP'}\n self.seed = None\n self.__dict__.update(kwargs)\n dict.__init__(self, self.__dict__)\n\n def __str__(self):\n return self.__dict__.__str__()\n\n def __repr__(self):\n return self.__dict__.__repr__()\n\n\nclass DQNAgent(AgentBase):\n \"\"\"Interacts with and learns from the environment.\"\"\"\n\n def __init__(self, state_size, action_size, **kwargs):\n \"\"\"Initialize an Agent object.\n\n Params\n ======\n state_size (int): dimension of each state\n action_size (int): dimension of each action\n seed (int): random seed\n \"\"\"\n self.state_size = state_size\n self.action_size = action_size\n self.settings = DQNAgentSettings(**kwargs)\n self.seed = self.settings.seed\n random.seed(self.seed)\n\n # Q-Network\n logger.debug('Settings : {}'.format(self.settings))\n logger.debug('Loading Network as : {}'.format(self.settings.network))\n self.qnetwork_local = hydra.utils.instantiate(self.settings.network, state_size, action_size).to(\n self.settings.device)\n self.qnetwork_target = hydra.utils.instantiate(self.settings.network, state_size, action_size).to(\n self.settings.device)\n self.optimizer = optim.Adam(\n self.qnetwork_local.parameters(), lr=self.settings.learning_rate)\n\n # Replay memory\n # self.memory = ReplayBuffer(\n # state_size, action_size, int(self.settings.buffer_size), self.settings.batch_size, self.settings.seed)\n self.memory = PrioritizedReplayBuffer(\n state_size, action_size, int(self.settings.buffer_size), self.settings.batch_size, self.settings.seed)\n # Initialize time step (for updating every UPDATE_EVERY steps)\n self.t_step = 0\n\n def step(self, state, action, reward, next_state, done):\n # Save experience in replay memory.\n # TODO(yycho0108): validate batch_dim equal for all inputs.\n batch_dim = state.shape[:-len(self.state_size)]\n if len(batch_dim) == 0:\n self.memory.add(state, action, reward, next_state, done)\n else:\n self.memory.extend(state, action, reward, next_state, done)\n step_size = 1 if len(batch_dim) == 0 else len(state)\n prev_step = self.t_step\n self.t_step += step_size\n\n # Skip learning if insufficient memory size.\n should_learn = (len(self.memory) >= self.settings.batch_size and\n self.t_step > self.settings.train_delay_step)\n\n # If learning is not enabled, just apply the step size here and return.\n if not should_learn:\n return\n\n # Learn every `update_period` time steps.\n # NOTE(yycho0108): for parallalized runs, number of added steps\n # may not equal to 1. In such a case, the number of updates\n # should be determined based on the step size.\n num_updates = count_boundaries(\n prev_step, step_size, self.settings.update_period)\n for _ in range(num_updates):\n experiences, indices = self.memory.sample()\n experiences = [torch.from_numpy(e).to(\n self.settings.device) for e in experiences]\n q_error = self.learn(experiences, self.settings.gamma)\n if isinstance(self.memory, PrioritizedReplayBuffer):\n with torch.no_grad():\n # FIXME(yycho0108): 1e-6 -> kMinPriority?\n new_priorities = q_error.abs() + 1e-6\n self.memory.update_priorities(\n indices, new_priorities.cpu().numpy())\n\n # Update target network similarly every `target_update_period` time steps.\n # Here, similar logic is applied to count the number of target updates.\n # Note that here the update factor is pre-calculated\n # as an exponential and only applied once.\n num_target_updates = count_boundaries(\n prev_step, step_size, self.settings.target_update_period)\n if num_target_updates > 0:\n target_update_factor = (1.0 -\n (1.0 - self.settings.target_update_factor) ** num_target_updates)\n self.soft_update(self.qnetwork_local,\n self.qnetwork_target, target_update_factor)\n\n def select_action(self, state, eps=0.):\n \"\"\"Returns actions for given state as per current policy.\n\n Params\n ======\n state (array_like): current state\n eps (float): epsilon, for epsilon-greedy action selection\n \"\"\"\n batch_dim = state.shape[:-len(self.state_size)]\n state = torch.from_numpy(state).float().to(self.settings.device)\n if len(batch_dim) == 0:\n state = state.unsqueeze(0)\n self.qnetwork_local.eval()\n with torch.no_grad():\n action_values = self.qnetwork_local(state)\n self.qnetwork_local.train()\n\n # Epsilon-greedy action selection\n p = np.random.uniform(0.0, 1.0, size=batch_dim)\n action = np.where(p > eps,\n np.argmax(action_values.cpu().data.numpy(), axis=-1),\n np.random.choice(self.action_size, size=batch_dim))\n if len(batch_dim) == 0:\n action = np.asscalar(np.squeeze(action, 0))\n return action\n\n def learn(self, experiences, gamma):\n \"\"\"Update value parameters using given batch of experience tuples.\n\n Params\n ======\n experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples\n gamma (float): discount factor\n \"\"\"\n states, actions, rewards, next_states, dones = experiences\n\n with torch.no_grad():\n Q_target_next = self.qnetwork_target(\n next_states).detach().max(1)[0].unsqueeze(1)\n rewards.unsqueeze_(1)\n dones.unsqueeze_(1)\n Q_target = rewards + (1 - dones.float()) * (gamma * Q_target_next)\n\n Q_local = self.qnetwork_local(states).gather(\n 1, actions.long().unsqueeze(1))\n\n loss = F.mse_loss(Q_local, Q_target)\n # loss = F.smooth_l1_loss(Q_local, Q_target)\n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # Separately return the error.\n with torch.no_grad():\n Q_error = Q_local - Q_target\n return Q_error.squeeze(1)\n\n def soft_update(self, local_model, target_model, tau):\n \"\"\"Soft update model parameters.\n θ_target = τ*θ_local + (1 - τ)*θ_target\n\n Params\n ======\n local_model (PyTorch model): weights will be copied from\n target_model (PyTorch model): weights will be copied to\n tau (float): interpolation parameter\n \"\"\"\n for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):\n target_param.data.copy_(\n tau*local_param.data + (1.0-tau)*target_param.data)\n\n @staticmethod\n def _get_checkpoint(path, i=None, search=False):\n # Try with default first.\n if i is None:\n filename = '{}/checkpoint.pth'.format(path)\n else:\n filename = '{}/checkpoint-{}.pth'.format(path, i)\n\n # Optionally, fallback to search directory for any available checkpoint file.\n if not Path(filename).exists() and search:\n files = Path(path).glob('**/*.pth')\n filename = sorted(files, key=os.path.getmtime)[-1]\n logger.info('Fallback to loading from : {}'.format(filename))\n\n return filename\n\n def load(self, path='', i=None):\n filename = self._get_checkpoint(path, i, search=True)\n state_dict = torch.load(filename)\n logger.info('Agent loading from : {}'.format(filename))\n self.qnetwork_local.load_state_dict(state_dict)\n\n def save(self, path='', i=None):\n filename = self._get_checkpoint(path, i, search=False)\n logger.info('Agent saving to : {}'.format(filename))\n torch.save(self.qnetwork_local.state_dict(), filename)\n", "repo_name": "yycho0108/DRLND_Core", "sub_path": "drlnd/core/agents/dqn_agent.py", "file_name": "dqn_agent.py", "file_ext": "py", "file_size_in_byte": 9281, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "drlnd.core.common.logger.get_default_logger", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 33, "usage_type": "attribute"}, {"api_name": "drlnd.core.agents.base_agent.AgentBase", "line_number": 46, "usage_type": "name"}, {"api_name": "random.seed", "line_number": 62, "usage_type": "call"}, {"api_name": "hydra.utils.instantiate", "line_number": 67, "usage_type": "call"}, {"api_name": "hydra.utils", "line_number": 67, "usage_type": "attribute"}, {"api_name": "hydra.utils.instantiate", "line_number": 69, "usage_type": "call"}, {"api_name": "hydra.utils", "line_number": 69, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 71, "usage_type": "name"}, {"api_name": "drlnd.core.common.prioritized_replay_buffer.PrioritizedReplayBuffer", "line_number": 77, "usage_type": "call"}, {"api_name": "drlnd.core.common.util.count_boundaries", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 110, "usage_type": "call"}, {"api_name": "drlnd.core.common.prioritized_replay_buffer.PrioritizedReplayBuffer", "line_number": 113, "usage_type": "argument"}, {"api_name": "torch.no_grad", "line_number": 114, "usage_type": "call"}, {"api_name": "drlnd.core.common.util.count_boundaries", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 150, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 153, "usage_type": "attribute"}, {"api_name": "numpy.asscalar", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 155, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 168, "usage_type": "call"}, {"api_name": "torch.nn.functional.mse_loss", "line_number": 178, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 178, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 186, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 213, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 214, "usage_type": "call"}, {"api_name": "os.path", "line_number": 215, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 222, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 229, "usage_type": "call"}]} +{"seq_id": "8105158196", "text": "import decimal\nfrom unittest import TestCase\n\nimport pydantic\nfrom pydantic import ValidationError\n\nfrom piccolo.columns import (\n JSON,\n JSONB,\n Array,\n Integer,\n Numeric,\n Secret,\n Text,\n Varchar,\n)\nfrom piccolo.columns.column_types import ForeignKey\nfrom piccolo.table import Table\nfrom piccolo.utils.pydantic import create_pydantic_model\n\n\nclass TestVarcharColumn(TestCase):\n def test_varchar_length(self):\n class Director(Table):\n name = Varchar(length=10)\n\n pydantic_model = create_pydantic_model(table=Director)\n\n with self.assertRaises(ValidationError):\n pydantic_model(name=\"This is a really long name\")\n\n pydantic_model(name=\"short name\")\n\n\nclass TestNumericColumn(TestCase):\n \"\"\"\n Numeric and Decimal are the same - so we'll just test Numeric.\n \"\"\"\n\n def test_numeric_digits(self):\n class Movie(Table):\n box_office = Numeric(digits=(5, 1))\n\n pydantic_model = create_pydantic_model(table=Movie)\n\n with self.assertRaises(ValidationError):\n # This should fail as there are too much numbers after the decimal\n # point\n pydantic_model(box_office=decimal.Decimal(\"1.11\"))\n\n with self.assertRaises(ValidationError):\n # This should fail as there are too much numbers in total\n pydantic_model(box_office=decimal.Decimal(\"11111.1\"))\n\n pydantic_model(box_office=decimal.Decimal(\"1.0\"))\n\n def test_numeric_without_digits(self):\n class Movie(Table):\n box_office = Numeric()\n\n try:\n create_pydantic_model(table=Movie)\n except TypeError:\n self.fail(\n \"Creating numeric field without\"\n \" digits failed in pydantic model.\"\n )\n else:\n self.assertTrue(True)\n\n\nclass TestSecretColumn(TestCase):\n def test_secret_param(self):\n class TopSecret(Table):\n confidential = Secret()\n\n pydantic_model = create_pydantic_model(table=TopSecret)\n self.assertEqual(\n pydantic_model.schema()[\"properties\"][\"confidential\"][\"extra\"][\n \"secret\"\n ],\n True,\n )\n\n\nclass TestArrayColumn(TestCase):\n def test_array_param(self):\n class Band(Table):\n members = Array(base_column=Varchar(length=16))\n\n pydantic_model = create_pydantic_model(table=Band)\n\n self.assertEqual(\n pydantic_model.schema()[\"properties\"][\"members\"][\"items\"][\"type\"],\n \"string\",\n )\n\n\nclass TestForeignKeyColumn(TestCase):\n def test_target_column(self):\n \"\"\"\n Make sure the target_column is correctly set in the Pydantic schema.\n \"\"\"\n\n class Manager(Table):\n name = Varchar(unique=True)\n\n class BandA(Table):\n manager = ForeignKey(Manager, target_column=Manager.name)\n\n class BandB(Table):\n manager = ForeignKey(Manager, target_column=\"name\")\n\n class BandC(Table):\n manager = ForeignKey(Manager)\n\n self.assertEqual(\n create_pydantic_model(table=BandA).schema()[\"properties\"][\n \"manager\"\n ][\"extra\"][\"target_column\"],\n \"name\",\n )\n\n self.assertEqual(\n create_pydantic_model(table=BandB).schema()[\"properties\"][\n \"manager\"\n ][\"extra\"][\"target_column\"],\n \"name\",\n )\n\n self.assertEqual(\n create_pydantic_model(table=BandC).schema()[\"properties\"][\n \"manager\"\n ][\"extra\"][\"target_column\"],\n \"id\",\n )\n\n\nclass TestTextColumn(TestCase):\n def test_text_format(self):\n class Band(Table):\n bio = Text()\n\n pydantic_model = create_pydantic_model(table=Band)\n\n self.assertEqual(\n pydantic_model.schema()[\"properties\"][\"bio\"][\"format\"],\n \"text-area\",\n )\n\n\nclass TestColumnHelpText(TestCase):\n \"\"\"\n Make sure that columns with `help_text` attribute defined have the\n relevant text appear in the schema.\n \"\"\"\n\n def test_help_text_present(self):\n help_text = \"In millions of US dollars.\"\n\n class Movie(Table):\n box_office = Numeric(digits=(5, 1), help_text=help_text)\n\n pydantic_model = create_pydantic_model(table=Movie)\n self.assertEqual(\n pydantic_model.schema()[\"properties\"][\"box_office\"][\"extra\"][\n \"help_text\"\n ],\n help_text,\n )\n\n\nclass TestTableHelpText(TestCase):\n \"\"\"\n Make sure that tables with `help_text` attribute defined have the\n relevant text appear in the schema.\n \"\"\"\n\n def test_help_text_present(self):\n help_text = \"Movies which were released in cinemas.\"\n\n class Movie(Table, help_text=help_text):\n name = Varchar()\n\n pydantic_model = create_pydantic_model(table=Movie)\n self.assertEqual(\n pydantic_model.schema()[\"help_text\"],\n help_text,\n )\n\n\nclass TestJSONColumn(TestCase):\n def test_default(self):\n class Movie(Table):\n meta = JSON()\n meta_b = JSONB()\n\n pydantic_model = create_pydantic_model(table=Movie)\n\n json_string = '{\"code\": 12345}'\n\n model_instance = pydantic_model(meta=json_string, meta_b=json_string)\n self.assertEqual(model_instance.meta, json_string)\n self.assertEqual(model_instance.meta_b, json_string)\n\n def test_deserialize_json(self):\n class Movie(Table):\n meta = JSON()\n meta_b = JSONB()\n\n pydantic_model = create_pydantic_model(\n table=Movie, deserialize_json=True\n )\n\n json_string = '{\"code\": 12345}'\n output = {\"code\": 12345}\n\n model_instance = pydantic_model(meta=json_string, meta_b=json_string)\n self.assertEqual(model_instance.meta, output)\n self.assertEqual(model_instance.meta_b, output)\n\n def test_validation(self):\n class Movie(Table):\n meta = JSON()\n meta_b = JSONB()\n\n for deserialize_json in (True, False):\n pydantic_model = create_pydantic_model(\n table=Movie, deserialize_json=deserialize_json\n )\n\n json_string = \"error\"\n\n with self.assertRaises(pydantic.ValidationError):\n pydantic_model(meta=json_string, meta_b=json_string)\n\n def test_json_format(self):\n class Movie(Table):\n features = JSON()\n\n pydantic_model = create_pydantic_model(table=Movie)\n\n self.assertEqual(\n pydantic_model.schema()[\"properties\"][\"features\"][\"format\"],\n \"json\",\n )\n\n\nclass TestExcludeColumns(TestCase):\n def test_all(self):\n class Computer(Table):\n CPU = Varchar()\n GPU = Varchar()\n\n pydantic_model = create_pydantic_model(Computer, exclude_columns=())\n\n properties = pydantic_model.schema()[\"properties\"]\n self.assertIsInstance(properties[\"GPU\"], dict)\n self.assertIsInstance(properties[\"CPU\"], dict)\n\n def test_exclude(self):\n class Computer(Table):\n CPU = Varchar()\n GPU = Varchar()\n\n pydantic_model = create_pydantic_model(\n Computer,\n exclude_columns=(Computer.CPU,),\n )\n\n properties = pydantic_model.schema()[\"properties\"]\n self.assertIsInstance(properties.get(\"GPU\"), dict)\n self.assertIsNone(properties.get(\"CPU\"))\n\n def test_exclude_all_manually(self):\n class Computer(Table):\n GPU = Varchar()\n CPU = Varchar()\n\n pydantic_model = create_pydantic_model(\n Computer,\n exclude_columns=(Computer.GPU, Computer.CPU),\n )\n\n self.assertEqual(pydantic_model.schema()[\"properties\"], {})\n\n def test_exclude_all_meta(self):\n class Computer(Table):\n GPU = Varchar()\n CPU = Varchar()\n\n pydantic_model = create_pydantic_model(\n Computer,\n exclude_columns=tuple(Computer._meta.columns),\n )\n\n self.assertEqual(pydantic_model.schema()[\"properties\"], {})\n\n def test_invalid_column_str(self):\n class Computer(Table):\n CPU = Varchar()\n GPU = Varchar()\n\n with self.assertRaises(ValueError):\n create_pydantic_model(\n Computer,\n exclude_columns=(\"CPU\",),\n )\n\n def test_invalid_column_different_table(self):\n class Computer(Table):\n CPU = Varchar()\n GPU = Varchar()\n\n class Computer2(Table):\n SSD = Varchar()\n\n with self.assertRaises(ValueError):\n create_pydantic_model(Computer, exclude_columns=(Computer2.SSD,))\n\n def test_invalid_column_different_table_same_type(self):\n class Computer(Table):\n CPU = Varchar()\n GPU = Varchar()\n\n class Computer2(Table):\n CPU = Varchar()\n\n with self.assertRaises(ValueError):\n create_pydantic_model(Computer, exclude_columns=(Computer2.CPU,))\n\n def test_exclude_nested(self):\n class Manager(Table):\n name = Varchar()\n phone_number = Integer()\n\n class Band(Table):\n name = Varchar()\n manager = ForeignKey(Manager)\n popularity = Integer()\n\n pydantic_model = create_pydantic_model(\n table=Band,\n exclude_columns=(\n Band.popularity,\n Band.manager.phone_number,\n ),\n nested=(Band.manager,),\n )\n\n model_instance = pydantic_model(\n name=\"Pythonistas\", manager={\"name\": \"Guido\"}\n )\n self.assertEqual(\n model_instance.dict(),\n {\"name\": \"Pythonistas\", \"manager\": {\"name\": \"Guido\"}},\n )\n\n\nclass TestIncludeColumns(TestCase):\n def test_include(self):\n class Band(Table):\n name = Varchar()\n popularity = Integer()\n\n pydantic_model = create_pydantic_model(\n Band,\n include_columns=(Band.name,),\n )\n\n properties = pydantic_model.schema()[\"properties\"]\n self.assertIsInstance(properties.get(\"name\"), dict)\n self.assertIsNone(properties.get(\"popularity\"))\n\n def test_include_exclude_error(self):\n \"\"\"\n An exception should be raised if both `include_columns` and\n `exclude_columns` are provided.\n \"\"\"\n\n class Band(Table):\n name = Varchar()\n popularity = Integer()\n\n with self.assertRaises(ValueError):\n create_pydantic_model(\n Band,\n exclude_columns=(Band.name,),\n include_columns=(Band.name,),\n )\n\n def test_nested(self):\n \"\"\"\n Make sure that columns on related tables work.\n \"\"\"\n\n class Manager(Table):\n name = Varchar()\n phone_number = Integer()\n\n class Band(Table):\n name = Varchar()\n manager = ForeignKey(Manager)\n popularity = Integer()\n\n pydantic_model = create_pydantic_model(\n table=Band,\n include_columns=(\n Band.name,\n Band.manager.name,\n ),\n nested=(Band.manager,),\n )\n\n model_instance = pydantic_model(\n name=\"Pythonistas\", manager={\"name\": \"Guido\"}\n )\n self.assertEqual(\n model_instance.dict(),\n {\"name\": \"Pythonistas\", \"manager\": {\"name\": \"Guido\"}},\n )\n\n\nclass TestNestedModel(TestCase):\n def test_true(self):\n \"\"\"\n Make sure all foreign key columns are converted to nested models, when\n `nested=True`.\n \"\"\"\n\n class Country(Table):\n name = Varchar(length=10)\n\n class Manager(Table):\n name = Varchar(length=10)\n country = ForeignKey(Country)\n\n class Band(Table):\n name = Varchar(length=10)\n manager = ForeignKey(Manager)\n\n BandModel = create_pydantic_model(table=Band, nested=True)\n\n #######################################################################\n\n ManagerModel = BandModel.__fields__[\"manager\"].type_\n self.assertTrue(issubclass(ManagerModel, pydantic.BaseModel))\n self.assertEqual(\n [i for i in ManagerModel.__fields__.keys()], [\"name\", \"country\"]\n )\n\n #######################################################################\n\n CountryModel = ManagerModel.__fields__[\"country\"].type_\n self.assertTrue(issubclass(CountryModel, pydantic.BaseModel))\n self.assertEqual([i for i in CountryModel.__fields__.keys()], [\"name\"])\n\n def test_tuple(self):\n \"\"\"\n Make sure only the specified foreign key columns are converted to\n nested models.\n \"\"\"\n\n class Country(Table):\n name = Varchar()\n\n class Manager(Table):\n name = Varchar()\n country = ForeignKey(Country)\n\n class Band(Table):\n name = Varchar()\n manager = ForeignKey(Manager)\n assistant_manager = ForeignKey(Manager)\n\n class Venue(Table):\n name = Varchar()\n\n class Concert(Table):\n band_1 = ForeignKey(Band)\n band_2 = ForeignKey(Band)\n venue = ForeignKey(Venue)\n\n #######################################################################\n # Test one level deep\n\n BandModel = create_pydantic_model(table=Band, nested=(Band.manager,))\n\n ManagerModel = BandModel.__fields__[\"manager\"].type_\n self.assertTrue(issubclass(ManagerModel, pydantic.BaseModel))\n self.assertEqual(\n [i for i in ManagerModel.__fields__.keys()], [\"name\", \"country\"]\n )\n self.assertEqual(ManagerModel.__qualname__, \"Band.manager\")\n\n AssistantManagerType = BandModel.__fields__[\"assistant_manager\"].type_\n self.assertTrue(AssistantManagerType is int)\n\n #######################################################################\n # Test two levels deep\n\n BandModel = create_pydantic_model(\n table=Band, nested=(Band.manager.country,)\n )\n\n ManagerModel = BandModel.__fields__[\"manager\"].type_\n self.assertTrue(issubclass(ManagerModel, pydantic.BaseModel))\n self.assertEqual(\n [i for i in ManagerModel.__fields__.keys()], [\"name\", \"country\"]\n )\n self.assertEqual(ManagerModel.__qualname__, \"Band.manager\")\n\n AssistantManagerType = BandModel.__fields__[\"assistant_manager\"].type_\n self.assertTrue(AssistantManagerType is int)\n\n CountryModel = ManagerModel.__fields__[\"country\"].type_\n self.assertTrue(issubclass(CountryModel, pydantic.BaseModel))\n self.assertEqual([i for i in CountryModel.__fields__.keys()], [\"name\"])\n self.assertEqual(CountryModel.__qualname__, \"Band.manager.country\")\n\n #######################################################################\n # Test three levels deep\n\n ConcertModel = create_pydantic_model(\n Concert, nested=(Concert.band_1.manager,)\n )\n\n VenueModel = ConcertModel.__fields__[\"venue\"].type_\n self.assertTrue(VenueModel is int)\n\n BandModel = ConcertModel.__fields__[\"band_1\"].type_\n self.assertTrue(issubclass(BandModel, pydantic.BaseModel))\n self.assertEqual(\n [i for i in BandModel.__fields__.keys()],\n [\"name\", \"manager\", \"assistant_manager\"],\n )\n self.assertEqual(BandModel.__qualname__, \"Concert.band_1\")\n\n ManagerModel = BandModel.__fields__[\"manager\"].type_\n self.assertTrue(issubclass(ManagerModel, pydantic.BaseModel))\n self.assertEqual(\n [i for i in ManagerModel.__fields__.keys()],\n [\"name\", \"country\"],\n )\n self.assertEqual(ManagerModel.__qualname__, \"Concert.band_1.manager\")\n\n AssistantManagerType = BandModel.__fields__[\"assistant_manager\"].type_\n self.assertTrue(AssistantManagerType is int)\n\n CountryModel = ManagerModel.__fields__[\"country\"].type_\n self.assertTrue(CountryModel is int)\n\n #######################################################################\n # Test with `model_name` arg\n\n MyConcertModel = create_pydantic_model(\n Concert,\n nested=(Concert.band_1.manager,),\n model_name=\"MyConcertModel\",\n )\n\n BandModel = MyConcertModel.__fields__[\"band_1\"].type_\n self.assertEqual(BandModel.__qualname__, \"MyConcertModel.band_1\")\n\n ManagerModel = BandModel.__fields__[\"manager\"].type_\n self.assertEqual(\n ManagerModel.__qualname__, \"MyConcertModel.band_1.manager\"\n )\n\n def test_cascaded_args(self):\n \"\"\"\n Make sure that arguments passed to ``create_pydantic_model`` are\n cascaded to nested models.\n \"\"\"\n\n class Country(Table):\n name = Varchar(length=10)\n\n class Manager(Table):\n name = Varchar(length=10)\n country = ForeignKey(Country)\n\n class Band(Table):\n name = Varchar(length=10)\n manager = ForeignKey(Manager)\n\n BandModel = create_pydantic_model(\n table=Band, nested=True, include_default_columns=True\n )\n\n ManagerModel = BandModel.__fields__[\"manager\"].type_\n self.assertTrue(issubclass(ManagerModel, pydantic.BaseModel))\n self.assertEqual(\n [i for i in ManagerModel.__fields__.keys()],\n [\"id\", \"name\", \"country\"],\n )\n\n CountryModel = ManagerModel.__fields__[\"country\"].type_\n self.assertTrue(issubclass(CountryModel, pydantic.BaseModel))\n self.assertEqual(\n [i for i in CountryModel.__fields__.keys()], [\"id\", \"name\"]\n )\n\n\nclass TestRecursionDepth(TestCase):\n def test_max(self):\n class Country(Table):\n name = Varchar()\n\n class Manager(Table):\n name = Varchar()\n country = ForeignKey(Country)\n\n class Band(Table):\n name = Varchar()\n manager = ForeignKey(Manager)\n assistant_manager = ForeignKey(Manager)\n\n class Venue(Table):\n name = Varchar()\n\n class Concert(Table):\n band = ForeignKey(Band)\n venue = ForeignKey(Venue)\n\n ConcertModel = create_pydantic_model(\n table=Concert, nested=True, max_recursion_depth=2\n )\n\n VenueModel = ConcertModel.__fields__[\"venue\"].type_\n self.assertTrue(issubclass(VenueModel, pydantic.BaseModel))\n\n BandModel = ConcertModel.__fields__[\"band\"].type_\n self.assertTrue(issubclass(BandModel, pydantic.BaseModel))\n\n ManagerModel = BandModel.__fields__[\"manager\"].type_\n self.assertTrue(issubclass(ManagerModel, pydantic.BaseModel))\n\n # We should have hit the recursion depth:\n CountryModel = ManagerModel.__fields__[\"country\"].type_\n self.assertTrue(CountryModel is int)\n\n\nclass TestDBColumnName(TestCase):\n def test_db_column_name(self):\n \"\"\"\n Make sure that the Pydantic model has an alias if ``db_column_name``\n is specified for a column.\n \"\"\"\n\n class Band(Table):\n name = Varchar(db_column_name=\"regrettable_column_name\")\n\n BandModel = create_pydantic_model(table=Band)\n\n model = BandModel(regrettable_column_name=\"test\")\n\n self.assertTrue(model.name == \"test\")\n\n\nclass TestSchemaExtraKwargs(TestCase):\n def test_schema_extra_kwargs(self):\n \"\"\"\n Make sure that the ``schema_extra_kwargs`` arguments are reflected in\n Pydantic model's schema.\n \"\"\"\n\n class Band(Table):\n name = Varchar()\n\n model = create_pydantic_model(Band, visible_columns=(\"name\",))\n self.assertEqual(model.schema()[\"visible_columns\"], (\"name\",))\n", "repo_name": "vivekviram123/piccolo_01", "sub_path": "piccolo_legal/lib/python3.8/site-packages/tests/utils/test_pydantic.py", "file_name": "test_pydantic.py", "file_ext": "py", "file_size_in_byte": 20099, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "unittest.TestCase", "line_number": 22, "usage_type": "name"}, {"api_name": "piccolo.table.Table", "line_number": 24, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 25, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 27, "usage_type": "call"}, {"api_name": "pydantic.ValidationError", "line_number": 29, "usage_type": "argument"}, {"api_name": "unittest.TestCase", "line_number": 35, "usage_type": "name"}, {"api_name": "piccolo.table.Table", "line_number": 41, "usage_type": "name"}, {"api_name": "piccolo.columns.Numeric", "line_number": 42, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 44, "usage_type": "call"}, {"api_name": "pydantic.ValidationError", "line_number": 46, "usage_type": "argument"}, {"api_name": "decimal.Decimal", "line_number": 49, "usage_type": "call"}, {"api_name": "pydantic.ValidationError", "line_number": 51, "usage_type": "argument"}, {"api_name": "decimal.Decimal", "line_number": 53, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 55, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 58, "usage_type": "name"}, {"api_name": "piccolo.columns.Numeric", "line_number": 59, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 62, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 72, "usage_type": "name"}, {"api_name": "piccolo.table.Table", "line_number": 74, "usage_type": "name"}, {"api_name": "piccolo.columns.Secret", "line_number": 75, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 77, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 86, "usage_type": "name"}, {"api_name": "piccolo.table.Table", "line_number": 88, "usage_type": "name"}, {"api_name": "piccolo.columns.Array", "line_number": 89, "usage_type": "call"}, {"api_name": "piccolo.columns.Varchar", "line_number": 89, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 91, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 99, "usage_type": "name"}, {"api_name": "piccolo.table.Table", "line_number": 105, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 106, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 108, "usage_type": "name"}, {"api_name": "piccolo.columns.column_types.ForeignKey", "line_number": 109, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 111, "usage_type": "name"}, {"api_name": "piccolo.columns.column_types.ForeignKey", "line_number": 112, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 114, "usage_type": "name"}, {"api_name": "piccolo.columns.column_types.ForeignKey", "line_number": 115, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 118, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 125, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 132, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 139, "usage_type": "name"}, {"api_name": "piccolo.table.Table", "line_number": 141, "usage_type": "name"}, {"api_name": "piccolo.columns.Text", "line_number": 142, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 144, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 152, "usage_type": "name"}, {"api_name": "piccolo.table.Table", "line_number": 161, "usage_type": "name"}, {"api_name": "piccolo.columns.Numeric", "line_number": 162, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 164, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 173, "usage_type": "name"}, {"api_name": "piccolo.table.Table", "line_number": 182, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 183, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 185, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 192, "usage_type": "name"}, {"api_name": "piccolo.table.Table", "line_number": 194, "usage_type": "name"}, {"api_name": "piccolo.columns.JSON", "line_number": 195, "usage_type": "call"}, {"api_name": "piccolo.columns.JSONB", "line_number": 196, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 198, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 207, "usage_type": "name"}, {"api_name": "piccolo.columns.JSON", "line_number": 208, "usage_type": "call"}, {"api_name": "piccolo.columns.JSONB", "line_number": 209, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 211, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 223, "usage_type": "name"}, {"api_name": "piccolo.columns.JSON", "line_number": 224, "usage_type": "call"}, {"api_name": "piccolo.columns.JSONB", "line_number": 225, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 228, "usage_type": "call"}, {"api_name": "pydantic.ValidationError", "line_number": 234, "usage_type": "attribute"}, {"api_name": "piccolo.table.Table", "line_number": 238, "usage_type": "name"}, {"api_name": "piccolo.columns.JSON", "line_number": 239, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 241, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 249, "usage_type": "name"}, {"api_name": "piccolo.table.Table", "line_number": 251, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 252, "usage_type": "call"}, {"api_name": "piccolo.columns.Varchar", "line_number": 253, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 255, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 262, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 263, "usage_type": "call"}, {"api_name": "piccolo.columns.Varchar", "line_number": 264, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 266, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 276, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 277, "usage_type": "call"}, {"api_name": "piccolo.columns.Varchar", "line_number": 278, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 280, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 288, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 289, "usage_type": "call"}, {"api_name": "piccolo.columns.Varchar", "line_number": 290, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 292, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 300, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 301, "usage_type": "call"}, {"api_name": "piccolo.columns.Varchar", "line_number": 302, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 305, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 311, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 312, "usage_type": "call"}, {"api_name": "piccolo.columns.Varchar", "line_number": 313, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 315, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 316, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 319, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 322, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 323, "usage_type": "call"}, {"api_name": "piccolo.columns.Varchar", "line_number": 324, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 326, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 327, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 330, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 333, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 334, "usage_type": "call"}, {"api_name": "piccolo.columns.Integer", "line_number": 335, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 337, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 338, "usage_type": "call"}, {"api_name": "piccolo.columns.column_types.ForeignKey", "line_number": 339, "usage_type": "call"}, {"api_name": "piccolo.columns.Integer", "line_number": 340, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 342, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 360, "usage_type": "name"}, {"api_name": "piccolo.table.Table", "line_number": 362, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 363, "usage_type": "call"}, {"api_name": "piccolo.columns.Integer", "line_number": 364, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 366, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 381, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 382, "usage_type": "call"}, {"api_name": "piccolo.columns.Integer", "line_number": 383, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 386, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 397, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 398, "usage_type": "call"}, {"api_name": "piccolo.columns.Integer", "line_number": 399, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 401, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 402, "usage_type": "call"}, {"api_name": "piccolo.columns.column_types.ForeignKey", "line_number": 403, "usage_type": "call"}, {"api_name": "piccolo.columns.Integer", "line_number": 404, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 406, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 424, "usage_type": "name"}, {"api_name": "piccolo.table.Table", "line_number": 431, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 432, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 434, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 435, "usage_type": "call"}, {"api_name": "piccolo.columns.column_types.ForeignKey", "line_number": 436, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 438, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 439, "usage_type": "call"}, {"api_name": "piccolo.columns.column_types.ForeignKey", "line_number": 440, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 442, "usage_type": "call"}, {"api_name": "pydantic.BaseModel", "line_number": 447, "usage_type": "attribute"}, {"api_name": "pydantic.BaseModel", "line_number": 455, "usage_type": "attribute"}, {"api_name": "piccolo.table.Table", "line_number": 464, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 465, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 467, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 468, "usage_type": "call"}, {"api_name": "piccolo.columns.column_types.ForeignKey", "line_number": 469, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 471, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 472, "usage_type": "call"}, {"api_name": "piccolo.columns.column_types.ForeignKey", "line_number": 473, "usage_type": "call"}, {"api_name": "piccolo.columns.column_types.ForeignKey", "line_number": 474, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 476, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 477, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 479, "usage_type": "name"}, {"api_name": "piccolo.columns.column_types.ForeignKey", "line_number": 480, "usage_type": "call"}, {"api_name": "piccolo.columns.column_types.ForeignKey", "line_number": 481, "usage_type": "call"}, {"api_name": "piccolo.columns.column_types.ForeignKey", "line_number": 482, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 487, "usage_type": "call"}, {"api_name": "pydantic.BaseModel", "line_number": 490, "usage_type": "attribute"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 502, "usage_type": "call"}, {"api_name": "pydantic.BaseModel", "line_number": 507, "usage_type": "attribute"}, {"api_name": "pydantic.BaseModel", "line_number": 517, "usage_type": "attribute"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 524, "usage_type": "call"}, {"api_name": "pydantic.BaseModel", "line_number": 532, "usage_type": "attribute"}, {"api_name": "pydantic.BaseModel", "line_number": 540, "usage_type": "attribute"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 556, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 576, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 577, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 579, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 580, "usage_type": "call"}, {"api_name": "piccolo.columns.column_types.ForeignKey", "line_number": 581, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 583, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 584, "usage_type": "call"}, {"api_name": "piccolo.columns.column_types.ForeignKey", "line_number": 585, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 587, "usage_type": "call"}, {"api_name": "pydantic.BaseModel", "line_number": 592, "usage_type": "attribute"}, {"api_name": "pydantic.BaseModel", "line_number": 599, "usage_type": "attribute"}, {"api_name": "unittest.TestCase", "line_number": 605, "usage_type": "name"}, {"api_name": "piccolo.table.Table", "line_number": 607, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 608, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 610, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 611, "usage_type": "call"}, {"api_name": "piccolo.columns.column_types.ForeignKey", "line_number": 612, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 614, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 615, "usage_type": "call"}, {"api_name": "piccolo.columns.column_types.ForeignKey", "line_number": 616, "usage_type": "call"}, {"api_name": "piccolo.columns.column_types.ForeignKey", "line_number": 617, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 619, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 620, "usage_type": "call"}, {"api_name": "piccolo.table.Table", "line_number": 622, "usage_type": "name"}, {"api_name": "piccolo.columns.column_types.ForeignKey", "line_number": 623, "usage_type": "call"}, {"api_name": "piccolo.columns.column_types.ForeignKey", "line_number": 624, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 626, "usage_type": "call"}, {"api_name": "pydantic.BaseModel", "line_number": 631, "usage_type": "attribute"}, {"api_name": "pydantic.BaseModel", "line_number": 634, "usage_type": "attribute"}, {"api_name": "pydantic.BaseModel", "line_number": 637, "usage_type": "attribute"}, {"api_name": "unittest.TestCase", "line_number": 644, "usage_type": "name"}, {"api_name": "piccolo.table.Table", "line_number": 651, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 652, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 654, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 661, "usage_type": "name"}, {"api_name": "piccolo.table.Table", "line_number": 668, "usage_type": "name"}, {"api_name": "piccolo.columns.Varchar", "line_number": 669, "usage_type": "call"}, {"api_name": "piccolo.utils.pydantic.create_pydantic_model", "line_number": 671, "usage_type": "call"}]} +{"seq_id": "25452650716", "text": "import numpy as np\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom transformers import VectorTransformer\nfrom copy import deepcopy\n\n\nclass LinearModel:\n def __init__(self, scale=True, x_transform=None, y_transform=None, y_inverse_transform=None):\n self.x_transform = x_transform\n self.y_transform = y_transform\n self.y_inverse_transform = y_inverse_transform\n self.scaler = StandardScaler() if scale else None\n self.regressor = LinearRegression(fit_intercept=False)\n \n def fit(self, X, y):\n X_trans = self.x_transform(X) if self.x_transform else X\n X_trans = self.scaler.fit_transform(X_trans) if self.scaler else X_trans\n X_trans = np.append(X_trans, np.ones(X_trans.shape[0]).reshape((-1, 1)), axis=1)\n \n y_trans = self.y_transform(y) if self.y_transform else y\n self.regressor.fit(X_trans, y_trans)\n return self\n \n def predict(self, X):\n X_trans = self.x_transform(X) if self.x_transform else X\n X_trans = self.scaler.transform(X_trans) if self.scaler else X_trans\n X_trans = np.append(X_trans, np.ones(X_trans.shape[0]).reshape((-1, 1)), axis=1)\n \n y_pred_trans = self.regressor.predict(X_trans)\n y_pred = self.y_inverse_transform(y_pred_trans) if self.y_inverse_transform else y_pred_trans\n return y_pred\n\n\nclass MultiDimensionalRegressor:\n def __init__(self, dimensions=np.arange(1, 6), fit_decrease=False, regressor=KNeighborsRegressor, **kwargs):\n self.dimensions = dimensions\n self.regressor = regressor(**kwargs)\n self.fit_decrease = fit_decrease\n \n def fit(self, histories):\n self.regressors = []\n for dim in self.dimensions:\n vec_trans = VectorTransformer(length=dim)\n X, y = vec_trans.transform(histories)\n if self.fit_decrease:\n mask = np.all(np.diff(np.concatenate((X, y.reshape(-1, 1)), axis=1)) < 0, axis=-1)\n X, y = X[mask], y[mask]\n regr = deepcopy(self.regressor)\n regr.fit(X, y)\n self.regressors.append(regr)\n return self\n \n def predict(self, X):\n y = np.zeros(X.shape[0])\n counts = (X != 0).sum(axis=1)\n for (dim, regr) in zip(self.dimensions, self.regressors):\n mask = (counts == dim)\n if mask.sum() == 0:\n continue\n y[mask] = regr.predict(X[mask, -dim:])\n return y", "repo_name": "isadrtdinov/Solar-Activity", "sub_path": "models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 2571, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 14, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 37, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsRegressor", "line_number": 37, "usage_type": "name"}, {"api_name": "transformers.VectorTransformer", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 48, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 56, "usage_type": "call"}]} +{"seq_id": "9080883006", "text": "\"\"\"Utility functions for three-dimensional mobjects.\"\"\"\n\nfrom __future__ import annotations\n\n__all__ = [\n \"get_3d_vmob_gradient_start_and_end_points\",\n \"get_3d_vmob_start_corner_index\",\n \"get_3d_vmob_end_corner_index\",\n \"get_3d_vmob_start_corner\",\n \"get_3d_vmob_end_corner\",\n \"get_3d_vmob_unit_normal\",\n \"get_3d_vmob_start_corner_unit_normal\",\n \"get_3d_vmob_end_corner_unit_normal\",\n]\n\n\nfrom typing import TYPE_CHECKING, Literal\n\nimport numpy as np\n\nfrom manim.constants import ORIGIN, UP\nfrom manim.utils.space_ops import get_unit_normal\n\nif TYPE_CHECKING:\n from manim.typing import Point3D, Vector\n\n\ndef get_3d_vmob_gradient_start_and_end_points(vmob) -> tuple[Point3D, Point3D]:\n return (\n get_3d_vmob_start_corner(vmob),\n get_3d_vmob_end_corner(vmob),\n )\n\n\ndef get_3d_vmob_start_corner_index(vmob) -> Literal[0]:\n return 0\n\n\ndef get_3d_vmob_end_corner_index(vmob) -> int:\n return ((len(vmob.points) - 1) // 6) * 3\n\n\ndef get_3d_vmob_start_corner(vmob) -> Point3D:\n if vmob.get_num_points() == 0:\n return np.array(ORIGIN)\n return vmob.points[get_3d_vmob_start_corner_index(vmob)]\n\n\ndef get_3d_vmob_end_corner(vmob) -> Point3D:\n if vmob.get_num_points() == 0:\n return np.array(ORIGIN)\n return vmob.points[get_3d_vmob_end_corner_index(vmob)]\n\n\ndef get_3d_vmob_unit_normal(vmob, point_index: int) -> Vector:\n n_points = vmob.get_num_points()\n if len(vmob.get_anchors()) <= 2:\n return np.array(UP)\n i = point_index\n im3 = i - 3 if i > 2 else (n_points - 4)\n ip3 = i + 3 if i < (n_points - 3) else 3\n unit_normal = get_unit_normal(\n vmob.points[ip3] - vmob.points[i],\n vmob.points[im3] - vmob.points[i],\n )\n if np.linalg.norm(unit_normal) == 0:\n return np.array(UP)\n return unit_normal\n\n\ndef get_3d_vmob_start_corner_unit_normal(vmob) -> Vector:\n return get_3d_vmob_unit_normal(vmob, get_3d_vmob_start_corner_index(vmob))\n\n\ndef get_3d_vmob_end_corner_unit_normal(vmob) -> Vector:\n return get_3d_vmob_unit_normal(vmob, get_3d_vmob_end_corner_index(vmob))\n", "repo_name": "ManimCommunity/manim", "sub_path": "manim/mobject/three_d/three_d_utils.py", "file_name": "three_d_utils.py", "file_ext": "py", "file_size_in_byte": 2091, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 16609, "dataset": "github-code", "pt": "94", "api": [{"api_name": "typing.TYPE_CHECKING", "line_number": 24, "usage_type": "name"}, {"api_name": "manim.typing.Point3D", "line_number": 28, "usage_type": "name"}, {"api_name": "typing.Literal", "line_number": 35, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "manim.constants.ORIGIN", "line_number": 45, "usage_type": "argument"}, {"api_name": "manim.typing.Point3D", "line_number": 43, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 51, "usage_type": "call"}, {"api_name": "manim.constants.ORIGIN", "line_number": 51, "usage_type": "argument"}, {"api_name": "manim.typing.Point3D", "line_number": 49, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 58, "usage_type": "call"}, {"api_name": "manim.constants.UP", "line_number": 58, "usage_type": "argument"}, {"api_name": "manim.utils.space_ops.get_unit_normal", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 66, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 67, "usage_type": "call"}, {"api_name": "manim.constants.UP", "line_number": 67, "usage_type": "argument"}, {"api_name": "manim.typing.Vector", "line_number": 55, "usage_type": "name"}, {"api_name": "manim.typing.Vector", "line_number": 71, "usage_type": "name"}, {"api_name": "manim.typing.Vector", "line_number": 75, "usage_type": "name"}]} +{"seq_id": "25781460181", "text": "from __future__ import absolute_import\n\nimport logging\nimport six\n\nfrom django import forms\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom sentry.integrations.jira.utils import (\n transform_jira_fields_to_form_fields,\n transform_jira_choices_to_strings,\n)\nfrom sentry.models.integration import Integration\nfrom sentry.rules.actions.base import TicketEventAction\nfrom sentry.shared_integrations.exceptions import IntegrationError\nfrom sentry.utils.http import absolute_uri\nfrom sentry.web.decorators import transaction_start\n\nlogger = logging.getLogger(\"sentry.rules\")\n\n\nclass JiraNotifyServiceForm(forms.Form):\n jira_integration = forms.ChoiceField(choices=(), widget=forms.Select())\n\n def __init__(self, *args, **kwargs):\n integrations = [(i.id, i.name) for i in kwargs.pop(\"integrations\")]\n super(JiraNotifyServiceForm, self).__init__(*args, **kwargs)\n\n if integrations:\n self.fields[\"jira_integration\"].initial = integrations[0][0]\n\n self.fields[\"jira_integration\"].choices = integrations\n self.fields[\"jira_integration\"].widget.choices = self.fields[\"jira_integration\"].choices\n\n\nclass JiraCreateTicketAction(TicketEventAction):\n form_cls = JiraNotifyServiceForm\n label = u\"\"\"Create a Jira issue in {jira_integration} with these \"\"\"\n prompt = \"Create a Jira issue\"\n provider = \"jira\"\n integration_key = \"jira_integration\"\n\n def __init__(self, *args, **kwargs):\n super(JiraCreateTicketAction, self).__init__(*args, **kwargs)\n integration_choices = [(i.id, i.name) for i in self.get_integrations()]\n\n if not self.get_integration_id() and integration_choices:\n self.data[self.integration_key] = integration_choices[0][0]\n\n self.form_fields = {\n \"jira_integration\": {\n \"choices\": integration_choices,\n \"initial\": six.text_type(self.get_integration_id()),\n \"type\": \"choice\",\n \"updatesForm\": True,\n }\n }\n\n dynamic_fields = self.get_dynamic_form_fields()\n if dynamic_fields:\n self.form_fields.update(dynamic_fields)\n\n def render_label(self):\n # Make a copy of data.\n kwargs = transform_jira_choices_to_strings(self.form_fields, self.data)\n\n # Replace with \"removed\" if the integration was uninstalled.\n kwargs.update({\"jira_integration\": self.get_integration_name()})\n\n # Only add values when they exist.\n return self.label.format(**kwargs)\n\n def get_dynamic_form_fields(self):\n \"\"\"\n Either get the dynamic form fields cached on the DB or make an API call\n to Jira to get them for the selected integration. If both fail, return `None`.\n\n :return: Django form fields dictionary\n \"\"\"\n if \"dynamic_form_fields\" in self.data:\n return self.data[\"dynamic_form_fields\"]\n\n try:\n integration = self.get_integration()\n except Integration.DoesNotExist:\n pass\n else:\n installation = integration.get_installation(self.project.organization.id)\n if installation:\n try:\n fields = installation.get_create_issue_config_no_params()\n except IntegrationError as e:\n # TODO log when the API call fails.\n logger.info(e)\n else:\n dynamic_form_fields = transform_jira_fields_to_form_fields(fields)\n self.data[\"dynamic_form_fields\"] = dynamic_form_fields\n # TODO should I wipe out the rest of the data?\n return dynamic_form_fields\n return None\n\n def clean(self):\n cleaned_data = super(JiraCreateTicketAction, self).clean()\n\n jira_integration = cleaned_data.get(\"jira_integration\")\n try:\n Integration.objects.get(id=jira_integration)\n except Integration.DoesNotExist:\n raise forms.ValidationError(\n _(\"Jira integration is a required field.\",), code=\"invalid\",\n )\n\n def generate_footer(self, rule_url):\n return u\"This ticket was automatically created by Sentry via [{}|{}]\".format(\n self.rule.label, absolute_uri(rule_url),\n )\n\n @transaction_start(\"JiraCreateTicketAction.after\")\n def after(self, event, state):\n organization = self.project.organization\n integration = self.get_integration()\n installation = integration.get_installation(organization.id)\n\n self.data[\"title\"] = event.title\n self.data[\"description\"] = self.build_description(event, installation)\n\n def create_issue(event, futures):\n \"\"\"Create the Jira ticket for a given event\"\"\"\n\n # HACK to get fixVersion in the correct format\n if self.data.get(\"fixVersions\"):\n if not isinstance(self.data[\"fixVersions\"], list):\n self.data[\"fixVersions\"] = [self.data[\"fixVersions\"]]\n\n if self.data.get(\"dynamic_form_fields\"):\n del self.data[\"dynamic_form_fields\"]\n\n if not self.has_linked_issue(event, integration):\n resp = installation.create_issue(self.data)\n self.create_link(resp[\"key\"], integration, installation, event)\n else:\n logger.info(\n \"jira.rule_trigger.link_already_exists\",\n extra={\n \"rule_id\": self.rule.id,\n \"project_id\": self.project.id,\n \"group_id\": event.group.id,\n },\n )\n return\n\n key = u\"jira:{}\".format(integration.id)\n yield self.future(create_issue, key=key)\n", "repo_name": "caseyduquettesc/sentry", "sub_path": "src/sentry/integrations/jira/notify_action.py", "file_name": "notify_action.py", "file_ext": "py", "file_size_in_byte": 5780, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "94", "api": [{"api_name": "logging.getLogger", "line_number": 19, "usage_type": "call"}, {"api_name": "django.forms.Form", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 22, "usage_type": "name"}, {"api_name": "django.forms.ChoiceField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 23, "usage_type": "name"}, {"api_name": "django.forms.Select", "line_number": 23, "usage_type": "call"}, {"api_name": "sentry.rules.actions.base.TicketEventAction", "line_number": 36, "usage_type": "name"}, {"api_name": "six.text_type", "line_number": 53, "usage_type": "call"}, {"api_name": "sentry.integrations.jira.utils.transform_jira_choices_to_strings", "line_number": 65, "usage_type": "call"}, {"api_name": "sentry.models.integration.Integration.DoesNotExist", "line_number": 85, "usage_type": "attribute"}, {"api_name": "sentry.models.integration.Integration", "line_number": 85, "usage_type": "name"}, {"api_name": "sentry.shared_integrations.exceptions.IntegrationError", "line_number": 92, "usage_type": "name"}, {"api_name": "sentry.integrations.jira.utils.transform_jira_fields_to_form_fields", "line_number": 96, "usage_type": "call"}, {"api_name": "sentry.models.integration.Integration.objects.get", "line_number": 107, "usage_type": "call"}, {"api_name": "sentry.models.integration.Integration.objects", "line_number": 107, "usage_type": "attribute"}, {"api_name": "sentry.models.integration.Integration", "line_number": 107, "usage_type": "name"}, {"api_name": "sentry.models.integration.Integration.DoesNotExist", "line_number": 108, "usage_type": "attribute"}, {"api_name": "sentry.models.integration.Integration", "line_number": 108, "usage_type": "name"}, {"api_name": "django.forms.ValidationError", "line_number": 109, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 109, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 110, "usage_type": "call"}, {"api_name": "sentry.utils.http.absolute_uri", "line_number": 115, "usage_type": "call"}, {"api_name": "sentry.web.decorators.transaction_start", "line_number": 118, "usage_type": "call"}]} +{"seq_id": "7389155458", "text": "import cv2\n\n#getting the name of image\nfile_name=input(\"Enter the name of the image with extension \\t\")\n#reading the image\n\nsrc=cv2.imread(file_name,cv2.IMREAD_UNCHANGED)\n\n#specify the size of the image\n\n\nscale_percent=int(input(\"enter the size of image you want \\t\"))\n\nnew_width=int(src.shape[1]*scale_percent/100)\nnew_height=int(src.shape[0]*scale_percent/100)\n\noutput=cv2.resize(src,(new_width,new_height))\nsave_as=input(\"Enter the name along with extension you want to save the image \\t\")\ncv2.imwrite(save_as,output)\ncv2.waitKey(0)\n\n#The images should be present in the same directory where this program is present", "repo_name": "Ajay-v44/python_Projects", "sub_path": "Image Resizer/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 618, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "cv2.imread", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.IMREAD_UNCHANGED", "line_number": 7, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "22403463192", "text": "# -*- coding: utf-8 -*-\n#\n# purpose: Create 'archives' of eStation2 products to be disseminated through eumetcast\n# date: 27.07.2015\n\nfrom __future__ import absolute_import\n\nimport shutil\nimport os\nfrom lib.python import es_logging as log\nfrom lib.python import functions\nfrom database import querydb\nfrom config import es_constants\nfrom apps.productmanagement.datasets import Dataset\n\nlogger = log.my_logger(__name__)\n\ndef create_archive_eumetcast(product, version, subproducts, mapset, start_date=None, end_date=None, target_dir=None, overwrite=False, tgz=False):\n\n # Rename and copy to target dir (/data/archives by default) the eStation2 files\n\n # Check target_dir\n if target_dir is None:\n target_dir = es_constants.es2globals['archive_dir']\n\n # Loop over subproducts\n if not isinstance(subproducts,list):\n subproducts_list=[]\n subproducts_list.append(subproducts)\n else:\n subproducts_list=subproducts\n\n for subproduct in subproducts_list:\n\n # Identify all existing files\n # Check if dates have to be disregarded (i.e. get all files)\n if start_date==-1 or end_date==-1:\n my_dataset=Dataset(product, subproduct, mapset, version=version)\n filenames = my_dataset.get_filenames()\n else:\n my_dataset=Dataset(product, subproduct, mapset, version=version, from_date=start_date, to_date=end_date)\n filenames = my_dataset.get_filenames_range()\n filenames.sort()\n for filename in filenames:\n # Derive MESA_JRC_ filename\n archive_name=functions.convert_name_to_eumetcast(filename, tgz=tgz)\n # Check if the target_file already exist\n if not os.path.isfile(target_dir+os.path.sep+archive_name) or overwrite:\n\n target_file=target_dir+os.path.sep+archive_name\n if not tgz:\n # Copy only to target_dir\n status=shutil.copyfile(filename,target_file)\n else:\n command='tar -cvzf '+target_file+' -C '+os.path.dirname(filename)+' '+os.path.basename(filename)\n status=os.system(command)\n\n logger.info(\"Files copied for product [%s]/version [%s]/subproducts [%s]/mapset [%s]\" %(product, version, subproduct, mapset))\n", "repo_name": "eStation2/estation2.MESA", "sub_path": "src/apps/productmanagement/create_archive.py", "file_name": "create_archive.py", "file_ext": "py", "file_size_in_byte": 2304, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "97", "api": [{"api_name": "lib.python.es_logging.my_logger", "line_number": 16, "usage_type": "call"}, {"api_name": "lib.python.es_logging", "line_number": 16, "usage_type": "name"}, {"api_name": "config.es_constants.es2globals", "line_number": 24, "usage_type": "attribute"}, {"api_name": "config.es_constants", "line_number": 24, "usage_type": "name"}, {"api_name": "apps.productmanagement.datasets.Dataset", "line_number": 38, "usage_type": "call"}, {"api_name": "apps.productmanagement.datasets.Dataset", "line_number": 41, "usage_type": "call"}, {"api_name": "lib.python.functions.convert_name_to_eumetcast", "line_number": 46, "usage_type": "call"}, {"api_name": "lib.python.functions", "line_number": 46, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "shutil.copyfile", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 55, "usage_type": "call"}, {"api_name": "os.system", "line_number": 56, "usage_type": "call"}]} +{"seq_id": "11296243266", "text": "import cv2\nimport torch\nimport os\nimport numpy as np\n\nimport albumentations as A\n\nfrom albumentations.pytorch.transforms import ToTensorV2\nfrom torch.utils.data import Dataset\nfrom pycocotools.coco import COCO\n\nclass COCODTrain(Dataset):\n def __init__(self, annotations, data_dir, transform):\n\n super().__init__()\n self.data_dir = data_dir\n self.annotations = COCO(annotations)\n self.image_ids = self.annotations.getImgIds()\n self.transform = transform\n\n def __getitem__(self, index: int) -> None:\n\n # get image id\n image_id = self.image_ids[index]\n \n # load image info\n image_info = self.annotations.loadImgs(image_id)[0]\n \n # load raw image\n image = cv2.imread(os.path.join(self.data_dir, image_info['file_name']))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)\n image /= 255.0\n \n # get annotation ids\n ann_ids = self.annotations.getAnnIds(imgIds=image_info['id'])\n\n # load annotations info\n anns = self.annotations.loadAnns(ann_ids)\n \n # get bounding box info from annotaion\n bboxes = np.array([x['bbox'] for x in anns])\n\n # convert (x1, y1, w, h) to (x1, y1, x2, y2)\n bboxes[:, 2] = bboxes[:, 0] + bboxes[:, 2]\n bboxes[:, 3] = bboxes[:, 1] + bboxes[:, 3]\n bboxes = torch.tensor(bboxes, dtype=torch.float32)\n\n # get labels\n category_ids = np.array([x['category_id'] for x in anns])\n category_ids = torch.as_tensor(category_ids, dtype=torch.int64)\n\n # transforms\n if not self.transform: \n self.transform = A.Compose([\n ToTensorV2(p=1.0),\n ], bbox_params=A.BboxParams(format='pascal_voc', label_fields=['category_ids']))\n\n transformed = self.transform(image=image, bboxes=bboxes, category_ids=category_ids)\n\n return transformed['image'], transformed['bboxes'], transformed['category_ids']\n\n def __len__(self) -> int:\n return len(self.image_ids)\n\nclass COCOTest(COCODataset):\n def __init__(self, annotations, data_dir, transform):\n super().__init__(annotations, data_dir, transform)\n\n def __getitem__(self, index: int) -> None:\n # get image id\n image_id = self.image_ids[index]\n\n # load image info\n image_info = self.annotations.loadImgs(image_id)[0]\n\n # load raw image\n image = cv2.imread(os.path.join(self.data_dir, image_info[\"file_name\"]))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)\n image /= 255.0\n\n if not self.transform:\n self.transform = A.Compose([\n ToTensorV2(p=1.0),\n ])\n\n transformed = self.transform(image=image)\n return transformed['image']\n \n \n\n", "repo_name": "hyeonjini/faster-rcnn", "sub_path": "dataset/dataset.py", "file_name": "dataset.py", "file_ext": "py", "file_size_in_byte": 2847, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "94", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 12, "usage_type": "name"}, {"api_name": "pycocotools.coco.COCO", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 46, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.as_tensor", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.int64", "line_number": 50, "usage_type": "attribute"}, {"api_name": "albumentations.Compose", "line_number": 54, "usage_type": "call"}, {"api_name": "albumentations.pytorch.transforms.ToTensorV2", "line_number": 55, "usage_type": "call"}, {"api_name": "albumentations.BboxParams", "line_number": 56, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 78, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 78, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 78, "usage_type": "attribute"}, {"api_name": "albumentations.Compose", "line_number": 82, "usage_type": "call"}, {"api_name": "albumentations.pytorch.transforms.ToTensorV2", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "38840619236", "text": "from tkinter import *\nfrom tkinter import ttk, simpledialog\nfrom tkinter import filedialog\nfrom PIL import Image, ImageTk, ImageFilter\n\n\nclass ImageViewerApp:\n def __init__(self, root):\n self.root = root\n self.root.geometry('1920x1080')\n self.root.title(\"Image Viewer\")\n\n self.filters_mapping = {\n \"BLUR\": ImageFilter.BLUR,\n \"CONTOUR\": ImageFilter.CONTOUR,\n \"EMBOSS\": ImageFilter.EMBOSS,\n \"SMOOTH\": ImageFilter.SMOOTH_MORE,\n }\n\n self.frame_button = Frame(root)\n self.frame_button.pack(side=TOP, fill=X)\n\n self.load_button = ttk.Button(self.frame_button, text=\"Load Image\", command=self.load_image)\n self.load_button.pack(side=LEFT)\n\n self.rotate_button = ttk.Button(self.frame_button, text=\"Rotate CCW\", command=lambda: self.rotate_image('+'))\n self.rotate_button.pack(side=LEFT)\n\n self.rotate_button = ttk.Button(self.frame_button, text=\"Rotate CW\", command=lambda: self.rotate_image('-'))\n self.rotate_button.pack(side=LEFT)\n\n self.blur_button = ttk.Button(self.frame_button, text=\"Blur\", command=lambda: self.filters(\"BLUR\"))\n self.blur_button.pack(side=LEFT)\n\n self.contour_button = ttk.Button(self.frame_button, text=\"Contour\", command=lambda: self.filters(\"CONTOUR\"))\n self.contour_button.pack(side=LEFT)\n\n self.emboss_button = ttk.Button(self.frame_button, text=\"Emboss\", command=lambda: self.filters(\"EMBOSS\"))\n self.emboss_button.pack(side=LEFT)\n\n self.smooth_button = ttk.Button(self.frame_button, text=\"Smooth\", command=lambda: self.filters(\"SMOOTH\"))\n self.smooth_button.pack(side=LEFT)\n\n self.flip_button = ttk.Button(self.frame_button, text=\"Flip\", command=self.flip_image)\n self.flip_button.pack(side=LEFT)\n\n self.resize_button = ttk.Button(self.frame_button, text=\"Resize\", command=self.prompt_for_resize)\n self.resize_button.pack(side=LEFT)\n\n self.reset_button = ttk.Button(self.frame_button, text=\"Reset\", command=self.reset)\n self.reset_button.pack(side=LEFT)\n\n self.undo_button = ttk.Button(self.frame_button, text=\"Undo\", command=self.undo)\n self.undo_button.pack(side=LEFT)\n\n self.save_button = ttk.Button(self.frame_button, text=\"Save\", command=self.save_image)\n self.save_button.pack(side=LEFT)\n\n self.info_label = Label(root, text=\"Image Info:\")\n self.info_label.pack()\n\n self.image_label = Label(root)\n self.image_label.pack()\n\n self.image_history = []\n self.current_image_index = -1\n\n self.image = None\n self.photo = None\n self.displayed_image = None\n self.image_width = 0\n self.image_height = 0\n\n def load_image(self):\n file_path = filedialog.askopenfilename(filetypes=[(\"Image files\", \"*.jpg *.jpeg *.png *.gif *.bmp\")])\n if file_path:\n self.display_image(file_path)\n self.display_image_info(file_path)\n\n def display_image(self, file_path):\n if self.current_image_index < len(self.image_history) - 1:\n self.image_history = self.image_history[:self.current_image_index + 1]\n\n self.image = Image.open(file_path)\n self.image.thumbnail((1000, 1000))\n\n self.photo = ImageTk.PhotoImage(self.image)\n self.image_label.config(image=self.photo)\n self.image_label.image = self.photo\n\n self.displayed_image = self.image\n self.image_width, self.image_height = self.displayed_image.size\n\n self.image_history.append(self.displayed_image)\n self.current_image_index += 1\n\n def display_image_info(self, file_path):\n self.image = Image.open(file_path)\n info = (f\"Name: {self.image.filename}\\n\"\n f\"Size: {self.image.size}\\n\"\n f\"Format: {self.image.format}\\n\"\n f\"Mode: {self.image.mode}\")\n self.info_label.config(text=info)\n\n def rotate_image(self, direction):\n if self.displayed_image:\n angle = f\"{direction}90\"\n self.displayed_image = self.displayed_image.rotate(int(angle), expand=True)\n self.photo = ImageTk.PhotoImage(self.displayed_image)\n self.image_label.config(image=self.photo)\n self.image_width, self.image_height = self.displayed_image.size\n\n self.image_label.config(width=self.image_width, height=self.image_height)\n\n self.image_history.append(self.displayed_image)\n self.current_image_index += 1\n\n def filters(self, filter_name):\n if self.displayed_image:\n image_filter = self.filters_mapping[filter_name]\n filtered_image = self.displayed_image.filter(image_filter)\n\n self.image_history.append(filtered_image)\n self.current_image_index += 1\n\n self.photo = ImageTk.PhotoImage(filtered_image)\n\n self.image_label.config(image=self.photo)\n\n self.image_width, self.image_height = filtered_image.size\n self.image_label.config(width=self.image_width, height=self.image_height)\n\n def reset(self):\n if self.image_history:\n self.displayed_image = self.image_history[0]\n self.photo = ImageTk.PhotoImage(self.displayed_image)\n self.image_label.config(image=self.photo)\n self.image_width, self.image_height = self.displayed_image.size\n self.image_label.config(width=self.image_width, height=self.image_height)\n self.current_image_index = 0\n\n def undo(self):\n if self.current_image_index > 0:\n self.current_image_index -= 1\n previous_image = self.image_history[self.current_image_index]\n self.displayed_image = previous_image\n\n self.photo = ImageTk.PhotoImage(previous_image)\n self.image_label.config(image=self.photo)\n\n self.image_width, self.image_height = previous_image.size\n self.image_label.config(width=self.image_width, height=self.image_height)\n\n def flip_image(self):\n if self.displayed_image:\n self.displayed_image = self.displayed_image.transpose(Image.FLIP_LEFT_RIGHT)\n # for transpose(Image.FLIP_TOP_BOTTOM) can use rotate button and flip\n self.photo = ImageTk.PhotoImage(self.displayed_image)\n self.image_label.config(image=self.photo)\n self.image_width, self.image_height = self.displayed_image.size\n\n self.image_label.config(width=self.image_width, height=self.image_height)\n\n self.image_history.append(self.displayed_image)\n self.current_image_index += 1\n\n def resize_image(self, width, height):\n if self.displayed_image:\n self.displayed_image = self.displayed_image.resize((width, height))\n\n self.photo = ImageTk.PhotoImage(self.displayed_image)\n self.image_label.config(image=self.photo)\n self.image_width, self.image_height = self.displayed_image.size\n\n self.image_label.config(width=self.image_width, height=self.image_height)\n\n self.image_history.append(self.displayed_image)\n self.current_image_index += 1\n\n def prompt_for_resize(self):\n root = Tk()\n root.withdraw()\n width = simpledialog.askinteger(\"Resize Image\", \"Enter width:\")\n height = simpledialog.askinteger(\"Resize Image\", \"Enter height:\")\n\n if width is not None and height is not None:\n self.resize_image(width, height)\n else:\n return None\n\n def save_image(self):\n root = Tk()\n root.withdraw()\n file_path = filedialog.asksaveasfilename(defaultextension=\".jpg\",\n filetypes=[(\"JPEG files\", \"*.jpg\"), (\"All files\", \"*.*\")])\n\n if file_path:\n self.displayed_image.save(file_path)\n\n\nif __name__ == \"__main__\":\n main_root = Tk()\n app = ImageViewerApp(main_root)\n main_root.mainloop()\n", "repo_name": "ostrev/tkinter_tasks", "sub_path": "image_viewer.py", "file_name": "image_viewer.py", "file_ext": "py", "file_size_in_byte": 8012, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "PIL.ImageFilter.BLUR", "line_number": 14, "usage_type": "attribute"}, {"api_name": "PIL.ImageFilter", "line_number": 14, "usage_type": "name"}, {"api_name": "PIL.ImageFilter.CONTOUR", "line_number": 15, "usage_type": "attribute"}, {"api_name": "PIL.ImageFilter", "line_number": 15, "usage_type": "name"}, {"api_name": "PIL.ImageFilter.EMBOSS", "line_number": 16, "usage_type": "attribute"}, {"api_name": "PIL.ImageFilter", "line_number": 16, "usage_type": "name"}, {"api_name": "PIL.ImageFilter.SMOOTH_MORE", "line_number": 17, "usage_type": "attribute"}, {"api_name": "PIL.ImageFilter", "line_number": 17, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 23, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 23, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 26, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 26, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 29, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 29, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 32, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 32, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 35, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 35, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 38, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 38, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 41, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 41, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 44, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 44, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 47, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 47, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 50, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 50, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 53, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 53, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 56, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 56, "usage_type": "name"}, {"api_name": "tkinter.filedialog.askopenfilename", "line_number": 75, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 75, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 84, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 84, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 87, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 87, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 98, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 98, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 109, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 109, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 126, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 126, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 136, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 136, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 148, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 148, "usage_type": "name"}, {"api_name": "PIL.Image.FLIP_LEFT_RIGHT", "line_number": 156, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 156, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 158, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 158, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 171, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 171, "usage_type": "name"}, {"api_name": "tkinter.simpledialog.askinteger", "line_number": 183, "usage_type": "call"}, {"api_name": "tkinter.simpledialog", "line_number": 183, "usage_type": "name"}, {"api_name": "tkinter.simpledialog.askinteger", "line_number": 184, "usage_type": "call"}, {"api_name": "tkinter.simpledialog", "line_number": 184, "usage_type": "name"}, {"api_name": "tkinter.filedialog.asksaveasfilename", "line_number": 194, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 194, "usage_type": "name"}]} +{"seq_id": "28766628418", "text": "import pulumi\nimport pulumi_aws\n\nfrom lambda_util import create_python_lambda\n\nLAMBDA_SOURCE = \"lambda.py\"\nLAMBDA_PACKAGE = \"lambda\"\nLAMBDA_VERSION = \"1.0.0\"\n\n# Provision Lambda function which will be invoked upon an http request.\nlambda_function = create_python_lambda(LAMBDA_PACKAGE, LAMBDA_SOURCE, LAMBDA_VERSION)\n\n# Give API Gateway permissions to invoke the Lambda\nlambda_permission = pulumi_aws.lambda_.Permission(\n \"lambdaPermission\",\n action=\"lambda:InvokeFunction\",\n principal=\"apigateway.amazonaws.com\",\n function=lambda_function,\n)\n\n# Set up the API Gateway\napigw = pulumi_aws.apigatewayv2.Api(\n \"httpApiGateway\",\n protocol_type=\"HTTP\",\n route_key=\"GET /\",\n target=lambda_function.invoke_arn,\n)\n\n# Export the API endpoint for easy access\npulumi.export(\"url\", apigw.api_endpoint)\npulumi.export(\"invoke_arn\", lambda_function.name)\n", "repo_name": "MichaelStott/tech-squawks", "sub_path": "code/homepage/py/__main__.py", "file_name": "__main__.py", "file_ext": "py", "file_size_in_byte": 864, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "lambda_util.create_python_lambda", "line_number": 11, "usage_type": "call"}, {"api_name": "pulumi_aws.lambda_.Permission", "line_number": 14, "usage_type": "call"}, {"api_name": "pulumi_aws.lambda_", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pulumi_aws.apigatewayv2.Api", "line_number": 22, "usage_type": "call"}, {"api_name": "pulumi_aws.apigatewayv2", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pulumi.export", "line_number": 30, "usage_type": "call"}, {"api_name": "pulumi.export", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "7874030696", "text": "# Escreva um códgio que leia o ano de nascimento de 7 pessoas e informe quantas delas são maiores de idade,\n\n\nfrom datetime import date\n\nmaiores_idade = 0\nmenores_idade = 0\n\nfor i in range(1, 8):\n ano_nascimento = int(input(f'Informe o ano de nascimento da {i} pessoas: '))\n if date.today().year - ano_nascimento > 18:\n maiores_idade += 1\n else:\n menores_idade += 1\n\nprint(f'São {maiores_idade} pessoas maiores de idade e {menores_idade} menores de idade.')", "repo_name": "MarianeFTRosario/Python-Curso_em_video", "sub_path": "Python_Mundo_2/Exercicio_17.py", "file_name": "Exercicio_17.py", "file_ext": "py", "file_size_in_byte": 483, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "datetime.date.today", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "7353219800", "text": "# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\n\n# useful for handling different item types with a single interface\n# from itemadapter import ItemAdapter\nfrom twisted.enterprise import adbapi\nimport pymysql #pymysql是一个流行的Python模块,专门用于与MySQL数据库进行交互\n\n\nclass DoubanbooksPipeline:\n def __init__(self):\n connection = pymysql.connect(host='localhost',\n user='root',\n password='root',\n db='doubanbook',\n charset='utf8',\n cursorclass=pymysql.cursors.DictCursor)\n cursor = connection.cursor()\n cursor.execute('DROP TABLE IF EXISTS doubanbooks')\n cursor.execute('''create table doubanbooks (\n \t name varchar (300),\n \t author varchar (300),\n \t press varchar (300),\n date varchar (90),\n page varchar (90),\n price varchar (90),\n score varchar (90),\n rating_people varchar (33),\n ISBN varchar (90),\n subject_id varchar (33),\n \t tags varchar (2400));''')\n\n def process_item(self, item, spider):\n connection = pymysql.connect(host='localhost',\n user='root',\n password='root',\n db='doubanbook',\n charset='utf8',\n cursorclass=pymysql.cursors.DictCursor)\n cursor = connection.cursor()\n if item[\"page\"] is None:\n item[\"page\"] = 'null'\n cursor.execute(\"select name from doubanbooks where subject_id = %s limit 1\", (item[\"subject_id\"]))\n result = cursor.fetchone()\n if result is None:\n print(\"inserting \", item[\"name\"])\n cursor.execute(\n \"insert into doubanbooks (name, author, press, date, page, price, score,rating_people, ISBN,subject_id,tags) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s,%s)\",\n (item[\"name\"], item[\"author\"], item[\"press\"], item[\"date\"], item[\"page\"], item[\"price\"], item[\"score\"],\n item['rating_people'], item[\"ISBN\"], item[\"subject_id\"], item[\"tags\"]))\n print(item[\"name\"], item[\"author\"], item[\"press\"], item[\"date\"], item[\"page\"], item[\"price\"], item[\"score\"],\n item['rating_people'], item[\"ISBN\"], item[\"subject_id\"], item[\"tags\"])\n connection.commit()\n return item\n", "repo_name": "Titiuu/Scrapy_doubanbooks", "sub_path": "DoubanBooks/pipelines.py", "file_name": "pipelines.py", "file_ext": "py", "file_size_in_byte": 2940, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "94", "api": [{"api_name": "pymysql.connect", "line_number": 15, "usage_type": "call"}, {"api_name": "pymysql.cursors", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pymysql.connect", "line_number": 37, "usage_type": "call"}, {"api_name": "pymysql.cursors", "line_number": 42, "usage_type": "attribute"}]} +{"seq_id": "73765765750", "text": "import pandas as pd\nimport numpy as np\nimport datetime as dt\nimport geopandas as gpd\nfrom shapely.geometry import Point, LineString,Polygon\nimport matplotlib.pyplot as plt\nimport os\nimport folium\nattr='(c) OpenStreetMap contributors (c) CartoDB, CartoDB attributions'\nimport time\nfrom pyproj import Proj, CRS,transform\n\n#e4326=Proj(init='epsg:4326')\ne4326=CRS('EPSG:4326')\n#e3857=Proj(init='epsg:3857')\ne3857=CRS('EPSG:3857')\n\nclass Projection:\n '''\n helper to project lat/lon values to map\n '''\n #e4326=Proj(init='epsg:4326')\n e4326=CRS('EPSG:4326')\n #e3857=Proj(init='epsg:3857')\n e3857=CRS('EPSG:3857')\n\n @staticmethod\n def wgsToXy(lon,lat):\n t1=transform(Projection.e4326,Projection.e3857, lon,lat)\n #t2=transform(Proj('epsg:4326'), Proj('epsg:3857'), lon,lat)\n return t1\n\n @staticmethod\n def pointToXy(point):\n xy=point.split(\",\")\n return Projection.wgsToXy(float(xy[0]),float(xy[1]))\n\ncity=\"\"\npickle_files_loc = \"/root/pickles/\"+city+\"/\"\nhtmls_loc = \"/root/htmls/\"+city+\"/\"\npngs_loc = \"/root/pngs/\"+city+\"/\"\nulbr = [0,0,0,0]\ncenter = (0,0)\ntime_delta = 7\nzoom_start = 12\n\n# define a function to get important information from HERE Maps API output\ndef get_resume(data):\n data=data['RWS'][0]\n rws=data['RW']\n fis=[x['FIS'][0]['FI'] for x in rws]\n fisis = [direction for subfis in fis for direction in subfis]\n JFS = [x['CF'][0]['JF'] for x in fisis]\n IDS = [str(x['TMC']['PC'])+x['TMC']['QD'] for x in fisis]\n LIS = [x['LI'] for x in rws]\n SHPS = [x['SHP'] for x in fisis]\n FCS = [np.mean([x['FC'] for x in y]) for y in SHPS]\n SHPS_string=[\"\".join(list(np.array([x['value'] for x in shp]).flatten())) for shp in SHPS]\n df = pd.DataFrame({'JF':JFS,\n #'SU':SUS,\n 'FC':FCS,'ID':IDS,'shape':SHPS_string})\n return df\n\n# define a function to plot lines on folium and save it as HTML\ndef plot_folium(tes, fname, center, zoom_start, save=True):\n m=folium.Map(center, zoom_start=zoom_start,\n tiles=\"https://cartodb-basemaps-{s}.global.ssl.fastly.net/dark_nolabels/{z}/{x}/{y}.png\",\n #tiles = \"https://{s}.basemaps.cartocdn.com/base-dark/{z}/{x}/{y}.png\",\n attr=attr)\n color={2:'#069E2D',0:'#034732',1:'#008148',3:'#F5BB00',4:'#FB5012'}\n style_function = lambda x: {\n 'color' : color[x['properties']['group']],\n 'weight' : 5/x['properties']['FC']\n }\n tes_=tes\n for wi in np.sort(tes['FC'].unique())[::-1]:\n for c in np.sort(tes['group'].unique()):\n the_df=tes_[(tes_['FC']==wi)&(tes_['group']==c)]\n if len(the_df)==0:\n continue\n else:\n folium.GeoJson(the_df, style_function=style_function).add_to(m)\n if save==True:\n m.save(fname)\n else:\n return m\n\ndef process_to_html(fname1, time_delta, pickles_loc, htmls_loc, center, zoom_start):\n time1 = dt.datetime.strptime(fname1.split(\".\")[0], \"%Y-%m-%d %H-%M-%S\") + dt.timedelta(hours=time_delta)\n data1 = pd.read_pickle(os.path.join(pickles_loc, fname1))\n res1 = get_resume(data1)\n res1['group'] = pd.cut(res1.JF, [-1, 1, 3, 4, 8, 11]).cat.codes\n tes = pd.merge(gdf_ok, res1[['ID', 'JF', 'group']], on=\"ID\")\n plot_folium(tes, os.path.join(htmls_loc, str(time1).replace(\":\", \"-\") + \".html\"), center, zoom_start)\n return tes\n\ncities = [\"bandung\", \"malang\", \"jakarta\", \"singapore\", \"surabaya\", \"yogyakarta\"]\nulbrs = [[-7.058949,107.358087,-6.809475,107.852472], \n [-8.044023,112.510911,-7.909893,112.758103],\n [-6.281836,106.585229,-6.070211,107.071031],\n [1.239993,103.563487,1.473044,104.054782],\n [-7.353130,112.493300,-7.141971,112.979102],\n [-7.859463,110.241654,-7.743984,110.487302]]\ncenters = [(-6.934228,107.605280),\n (-7.976964,112.633906),\n (-6.176034,106.828130),\n (1.357061,103.811593),\n (-7.246360,112.737753),\n (-7.801727,110.364478)]\n\nroot_folder = \"..\"\nremove_pickles = True\ni = 0\nfor kota in cities:\n print(\"process \"+kota)\n city = kota\n pickle_files_loc = root_folder+\"/pickles/\"+city+\"/\"\n htmls_loc = root_folder+\"/htmls/\"+city+\"/\"\n pngs_loc = root_folder+\"/pngs/\"+city+\"/\"\n ulbr = ulbrs[i]\n center = centers[i]\n i += 1\n\n files = os.listdir(pickle_files_loc)\n\n pickle_count = 0\n begin_time = dt.datetime.now()\n for file in files:\n fname1 = file\n print(fname1)\n if(len(fname1)) > 19:\n time1 = dt.datetime.strptime(fname1.split(\".\")[0], \"%Y-%m-%d %H-%M-%S\") + dt.timedelta(hours=time_delta) # +14 because I live in PST\n data1 = pd.read_pickle(os.path.join(pickle_files_loc, fname1))\n res1 = get_resume(data1)\n gdf = res1[['ID', 'shape', 'FC']].copy()\n gdf['geometry'] = gdf['shape'].apply(lambda s: LineString(map(lambda x: Point(eval(x)[::-1]),s.strip().split(\" \"))))\n gdf = gpd.GeoDataFrame(gdf)\n gdf.crs = {'init': 'epsg:4326'}\n gdf.head()\n res1['group'] = pd.cut(res1.JF, [-1, 1, 3, 4, 8, 11]).cat.codes\n u, l = ulbr[0], ulbr[1]\n b, r = ulbr[2], ulbr[3],\n x = [l, r, r, l]\n y = [b, b, u, u]\n bound = Polygon([[px, py] for px, py in zip(x, y)])\n bd = gpd.GeoDataFrame(pd.Series([bound]).reset_index().rename(columns={0: 'geometry'}))\n bd.crs = {'init': 'epsg:4326'}\n gdf_ok = gpd.sjoin(gdf, bd)\n process_to_html(fname1, time_delta, pickle_files_loc, htmls_loc, center, zoom_start)\n pickle_count += 1\n if os.path.exists(pickle_files_loc+fname1) and remove_pickles == True:\n os.remove(pickle_files_loc+fname1)\n print(pickle_files_loc+fname1+\" deleted\")\n else:\n print(pickle_files_loc+fname1+\" does not exist\")\n print(city+\": \"+str(dt.datetime.now() - begin_time))\n print(city+\" done, \"+str(pickle_count)+\" files executed\")", "repo_name": "basnugroho/trafik", "sub_path": "pickles_to_htmls.py", "file_name": "pickles_to_htmls.py", "file_ext": "py", "file_size_in_byte": 6213, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "pyproj.CRS", "line_number": 14, "usage_type": "call"}, {"api_name": "pyproj.CRS", "line_number": 16, "usage_type": "call"}, {"api_name": "pyproj.CRS", "line_number": 23, "usage_type": "call"}, {"api_name": "pyproj.CRS", "line_number": 25, "usage_type": "call"}, {"api_name": "pyproj.transform", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 58, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 59, "usage_type": "call"}, {"api_name": "folium.Map", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 77, "usage_type": "call"}, {"api_name": "folium.GeoJson", "line_number": 82, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 89, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 89, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 89, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "pandas.cut", "line_number": 92, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path", "line_number": 94, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 124, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 127, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 127, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 132, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 132, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 132, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path", "line_number": 133, "usage_type": "attribute"}, {"api_name": "shapely.geometry.LineString", "line_number": 136, "usage_type": "call"}, {"api_name": "shapely.geometry.Point", "line_number": 136, "usage_type": "call"}, {"api_name": "geopandas.GeoDataFrame", "line_number": 137, "usage_type": "call"}, {"api_name": "pandas.cut", "line_number": 140, "usage_type": "call"}, {"api_name": "shapely.geometry.Polygon", "line_number": 145, "usage_type": "call"}, {"api_name": "geopandas.GeoDataFrame", "line_number": 146, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 146, "usage_type": "call"}, {"api_name": "geopandas.sjoin", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path", "line_number": 151, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 152, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 156, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 156, "usage_type": "attribute"}]} +{"seq_id": "8925239661", "text": "\nimport json\nimport logging\n\nfrom datastax_cassandra_deploy.node import Node\nfrom datastax_cassandra_deploy.cluster import Cluster\nfrom datastax_cassandra_deploy.opscenter import OpsCenter\nfrom datastax_cassandra_deploy.config import ConfigProfile\nfrom datastax_cassandra_deploy.datacenter import DataCenter\nfrom datastax_cassandra_deploy.repository import Repository\nfrom datastax_cassandra_deploy.credentials import Credentials\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_resource_by_name(name, resources):\n ''' find resource by name\n '''\n for resource in resources.get().get('results', []):\n if resource.get('name') == name:\n return resource\n return {}\n\n\ndef json_dump(output, data):\n ''' dump data into json file\n '''\n json.dump(data, output, sort_keys=True, indent=4)\n\n\nclass Topology():\n\n def __init__(self, deployments):\n \n self._deployments = deployments\n self.opscenter = None\n\n def deploy(self):\n ''' deploy\n '''\n if not self.connect():\n logger.error('Cannot connect to OpsCenters')\n return\n \n self.deploy_credentials()\n self.deploy_config_profiles()\n self.deploy_repositories()\n self.deploy_clusters()\n\n def dump(self, sections, output_path):\n ''' dump sections\n '''\n if not self.connect():\n logger.error('Cannot connect to OpsCenters')\n return\n\n with open(output_path, 'w') as output:\n if 'credentials' in sections:\n self.dump_credentials(output)\n \n if 'config-profiles' in sections:\n self.dump_config_profiles(output)\n\n if 'repositories' in sections:\n self.dump_repositories(output)\n\n if 'datacenters' in sections:\n self.dump_datacenters(output)\n\n if 'clusters' in sections:\n self.dump_clusters(output)\n\n def connect(self):\n ''' connect to OpsCenter\n '''\n self.opscenter = OpsCenter(\n hostname = self._deployments.get('opscenter', {}).get('hostname'),\n username = self._deployments.get('opscenter', {}).get('username'),\n password = self._deployments.get('opscenter', {}).get('password')\n )\n if self.opscenter.connect(\n timeout = self._deployments.get('opscenter', {}).get('timeout'), \n attempts = self._deployments.get('opscenter', {}).get('attempts')):\n \n return True\n\n return False \n\n def deploy_credentials(self):\n ''' deploy credentials\n '''\n credentials = Credentials(self.opscenter.url, self.opscenter.session)\n for creds in self._deployments.get('credentials', []):\n created_creds = credentials.add(**creds)\n if created_creds:\n logger.info(created_creds)\n\n def dump_credentials(self, output):\n ''' dump credentials\n '''\n credentials = Credentials(self.opscenter.url, self.opscenter.session)\n json_dump(output, {'credentials': credentials.get().get('results', []) })\n\n def deploy_config_profiles(self):\n ''' deploy config profiles\n '''\n config_profiles = ConfigProfile(self.opscenter.url, self.opscenter.session)\n for conf_profile in self._deployments.get('config-profiles', []):\n created_conf_profile = config_profiles.add(**conf_profile)\n if created_conf_profile:\n logger.info(created_conf_profile)\n\n def dump_config_profiles(self, output):\n ''' dump config profiles\n '''\n config_profiles = ConfigProfile(self.opscenter.url, self.opscenter.session)\n json_dump(output, {'config-profiles': config_profiles.get().get('results', []) })\n\n def deploy_repositories(self):\n ''' deploy repositories\n '''\n repositories = Repository(self.opscenter.url, self.opscenter.session)\n for repo in self._deployments.get('repositories', []):\n created_repo = repositories.add(**repo)\n if created_repo:\n logger.info(created_repo)\n\n def dump_repositories(self, output):\n ''' dump repositories\n '''\n repositories = Repository(self.opscenter.url, self.opscenter.session)\n json_dump(output, {'repositories': repositories.get().get('results', []) })\n\n def deploy_clusters(self):\n ''' deploy clusters\n '''\n clusters = Cluster(self.opscenter.url, self.opscenter.session)\n config_profiles = ConfigProfile(self.opscenter.url, self.opscenter.session)\n repositories = Repository(self.opscenter.url, self.opscenter.session)\n datacenters = DataCenter(self.opscenter.url, self.opscenter.session)\n nodes = Node(self.opscenter.url, self.opscenter.session)\n\n for cluster in self._deployments.get('clusters', []):\n\n cluster_nodes = cluster.pop('nodes', [])\n datacenter = cluster.pop('datacenter')\n \n config = get_resource_by_name( cluster.get('config-profile-id', None), config_profiles)\n cluster['config-profile-id'] = config.get('id', 'Unknown')\n\n repo = get_resource_by_name( cluster.get('repository-id', None), repositories)\n cluster['repository-id'] = repo.get('id', 'Unknown')\n\n created_cluster = clusters.add(**cluster)\n if not created_cluster:\n created_cluster = get_resource_by_name(cluster.get('name'), clusters)\n if not created_cluster:\n logger.error('Cannot create cluster, {}'.format(cluster))\n continue\n\n created_datacenter = datacenters.add(**{ 'name': datacenter, 'cluster-id': created_cluster['id'] })\n if not created_datacenter:\n created_datacenter = get_resource_by_name(datacenter, datacenters)\n if not created_datacenter:\n logger.error('Cannot create datacenter, name: {}, cluster: {}'.format(datacenter, created_cluster))\n continue\n\n for node in cluster_nodes:\n node['datacenter-id'] = created_datacenter['id']\n created_node = nodes.add(**node)\n if not created_node:\n created_node = get_resource_by_name(node['name'], nodes)\n if not created_node:\n logger.error('Cannot create node, {}'.format(node))\n continue\n \n", "repo_name": "ownport/datastax-cassandra-deploy", "sub_path": "datastax_cassandra_deploy/topology.py", "file_name": "topology.py", "file_ext": "py", "file_size_in_byte": 6611, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "94", "api": [{"api_name": "logging.getLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 28, "usage_type": "call"}, {"api_name": "datastax_cassandra_deploy.opscenter.OpsCenter", "line_number": 76, "usage_type": "call"}, {"api_name": "datastax_cassandra_deploy.credentials.Credentials", "line_number": 92, "usage_type": "call"}, {"api_name": "datastax_cassandra_deploy.credentials.Credentials", "line_number": 101, "usage_type": "call"}, {"api_name": "datastax_cassandra_deploy.config.ConfigProfile", "line_number": 107, "usage_type": "call"}, {"api_name": "datastax_cassandra_deploy.config.ConfigProfile", "line_number": 116, "usage_type": "call"}, {"api_name": "datastax_cassandra_deploy.repository.Repository", "line_number": 122, "usage_type": "call"}, {"api_name": "datastax_cassandra_deploy.repository.Repository", "line_number": 131, "usage_type": "call"}, {"api_name": "datastax_cassandra_deploy.cluster.Cluster", "line_number": 137, "usage_type": "call"}, {"api_name": "datastax_cassandra_deploy.config.ConfigProfile", "line_number": 138, "usage_type": "call"}, {"api_name": "datastax_cassandra_deploy.repository.Repository", "line_number": 139, "usage_type": "call"}, {"api_name": "datastax_cassandra_deploy.datacenter.DataCenter", "line_number": 140, "usage_type": "call"}, {"api_name": "datastax_cassandra_deploy.node.Node", "line_number": 141, "usage_type": "call"}]} +{"seq_id": "33902626582", "text": "from typing import List\nfrom aoc2020.utils import Resources\n\n\nclass XMASScanner:\n\n def __init__(self, scan_length: int):\n self._scan_length = scan_length\n self._preceeding_values: List[int] = []\n\n def _validate_value(self, value: int) -> bool:\n if len(self._preceeding_values) < self._scan_length:\n return True\n for s in self._preceeding_values:\n if s <= value and value - s != s and value - s in self._preceeding_values:\n return True\n\n return False\n\n def receive_value(self, value: int) -> bool:\n valid = self._validate_value(value)\n self._preceeding_values.append(value)\n if len(self._preceeding_values) > self._scan_length:\n self._preceeding_values.pop(0)\n\n return valid\n\n\ndef find_first_incorrect_value(values: List[int], scan_length: int) -> int:\n scanner = XMASScanner(scan_length)\n for v in values:\n if not scanner.receive_value(v):\n return v\n\n raise Exception('Could not find any invalid value')\n\n\ndef find_contiguous_sum_to_value(values: List[int], target: int) -> List[int]:\n for i in range(len(values)):\n for j in range(i + 1, len(values)):\n if sum(values[i:j]) == target:\n return values[i:j]\n\n raise Exception('Could not find values which sum to target')\n\n\ndef output_first_incorrect_value(values: List[int], scan_length: int) -> None:\n print('Invalid value found: {}'.format(find_first_incorrect_value(values, scan_length)))\n\n\ndef output_contiguous_sum(values: List[int], scan_length: int) -> None:\n target = find_first_incorrect_value(values, scan_length)\n sum_range = find_contiguous_sum_to_value(values, target)\n max_v = max(sum_range)\n min_v = min(sum_range)\n print('Found range with max value {} and min value {}'.format(max_v, min_v))\n print('{} + {} = {}'.format(min_v, max_v, min_v + max_v))\n\n\noutput = {\n 'first_incorrect_value': output_first_incorrect_value,\n 'contiguous_sum': output_contiguous_sum\n}\n\n\ndef main(resources: Resources) -> int:\n values = list(map(int, resources.read_resource(resources.config['numbers_file'])))\n \n output[resources.config['output']](values, resources.config['scan_length'])\n\n return 0\n", "repo_name": "Samour/adventofcode", "sub_path": "aoc2020/prob9/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2133, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "typing.List", "line_number": 9, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 29, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 47, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 51, "usage_type": "name"}, {"api_name": "aoc2020.utils.Resources", "line_number": 66, "usage_type": "name"}]} +{"seq_id": "43167131845", "text": "import logging\nimport math\nfrom typing import List, Optional\n\nimport pandas as pd\nfrom PySide6.QtCore import QModelIndex, Qt\nfrom PySide6.QtWidgets import QApplication, QMainWindow\n\nfrom statsu.actions.action_base import ActionBase\nfrom statsu.data.data_model import ChangeValuesInRectCommand\nfrom statsu.ui.data_container import Selections\n\nlogger = logging.getLogger(__name__)\n\nclass ActionEdit(ActionBase):\n def undo(self):\n self.main_window.command_manager.undo_command()\n self.main_window.get_current_data_container().model.refresh_layout()\n\n def redo(self):\n self.main_window.command_manager.redo_command()\n self.main_window.get_current_data_container().model.refresh_layout()\n\n def cut_data(self):\n container = self.main_window.get_current_data_container()\n selections = container.get_current_selections()\n\n self.copy_data(selections=selections)\n\n selections.clear_data()\n\n container.model.refresh_layout()\n\n def copy_data(self, *args, selections: Optional[Selections] = None):\n if selections is None:\n selections = self.main_window.get_current_data_container().get_current_selections()\n\n result: List[List[str]] = selections.get_rect_range_data()\n result_text = '\\r\\n'.join(['\\t'.join(item) for item in result])\n\n QApplication.clipboard().setText(result_text)\n\n def paste_data(self):\n container = self.main_window.get_current_data_container()\n min_y, min_x = container.get_current_selections().rect_range.get_top_left()\n\n data_text = QApplication.clipboard().text()\n data = [row.split('\\t') for row in data_text.split('\\r\\n')]\n\n origin = container.raw_data.iloc[min_y:min_y + len(data), min_x:min_x + len(data[0])]\n\n for idx, (_, row) in enumerate(origin.iterrows()):\n for n in range(len(data[idx])):\n if data[idx][n] == '':\n data[idx][n] = row.iloc[n]\n\n self.main_window.command_manager.execute_command(\n ChangeValuesInRectCommand(container.raw_data,\n min_y, min_y + len(data),\n min_x, min_x + len(data[0]),\n data,\n desc='Paste values'))\n self.main_window.get_current_data_container().model.refresh_layout()", "repo_name": "ysb06/statsu", "sub_path": "statsu/actions/action_edit.py", "file_name": "action_edit.py", "file_ext": "py", "file_size_in_byte": 2369, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "logging.getLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "statsu.actions.action_base.ActionBase", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 34, "usage_type": "name"}, {"api_name": "statsu.ui.data_container.Selections", "line_number": 34, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 38, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QApplication.clipboard", "line_number": 41, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QApplication", "line_number": 41, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QApplication.clipboard", "line_number": 47, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QApplication", "line_number": 47, "usage_type": "name"}, {"api_name": "statsu.data.data_model.ChangeValuesInRectCommand", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "26116833240", "text": "#Imports for web scraper\nimport requests\nfrom bs4 import BeautifulSoup\n\n#Python library for connecting to PostgreSQL DB\nimport psycopg2\nfrom psycopg2 import Error\nfrom psycopg2 import sql\n\n#Reading db config from JSON\nimport json\n\nimport sys\n\nmapDayToNum = {\n \"Sunday\": 0,\n \"Monday\": 1,\n \"Tuesday\": 2,\n \"Wednesday\": 3,\n \"Thursday\": 4,\n \"Friday\": 5,\n \"Saturday\": 6\n}\n\n\ndef save_html(html, path):\n with open(path, 'wb') as f:\n f.write(html)\n\n\ndef open_html(path):\n with open(path, 'rb') as f:\n return f.read()\n\n\nurl = 'https://lehigh.sodexomyway.com/dining-near-me/hours'\n\n'Uncomment when making actual request'\nr = requests.get(url)\n\n# print(r.content[:100])\n\n'Save html file locally'\n# save_html(r.content, 'lehighDining.html')\n\n# For testing\n# html = open_html('lehighDining.html')\n\ndiningLocations = {}\n#diningLocations dict structure\n\"\"\"\n{\n \"diningLocation\":\n {\n \n [\n {\"daynum\": 0\n \"starttime\": 2\n \"endtime\": 16},\n ...\n ...\n ]\n }\n ...\n ...\n}\n\"\"\"\n\n# Use r.content for 1st argument when making request\nsoup = BeautifulSoup(r.content, 'html.parser')\n# #NEW CODE\ndiningBlocks = soup.find_all(\"div\", attrs={\"class\": \"dining-block\"})\n\nfor diningBlock in diningBlocks:\n # Name of dining hall\n # print(diningBlock)\n diningTitleBlock = diningBlock.select(\"h3\")\n # print(diningTitleBlock)\n # print()\n diningName = None\n if diningTitleBlock[0].find(\"a\"):\n diningName = diningTitleBlock[0].find(\"a\").string\n else:\n diningName = diningTitleBlock[0].string\n\n # diningName = diningBlock.find(\"h3\").find(\"a\").string\n setRegHours = diningBlock.select(\".reghours\")\n # print(setRegHours)\n # print()\n for regHours in setRegHours:\n # print(regHours)\n # print()\n\n hours = regHours.select(\"div\")\n # print(hours)\n for hour in hours:\n # print(hour)\n setOfDays = hour.select(\"p\")\n # print(setOfDays)\n #If clear div is found\n if not setOfDays:\n continue\n \n regDays = setOfDays[0]\n\n setOfRegDays = regDays[\"data-arrayregdays\"]\n\n # Each set of days (array of strings) for certain hours\n # Need to map Sunday - Saturday as 0 - 6\n daysSplit = setOfRegDays.split(\",\")\n\n # The hours for set of days (could be \"Closed\" or there are given hours)\n setOfHours = setOfDays[-1].string #FIX\n\n for day in daysSplit:\n daynum = None\n starttime = None\n endtime = None\n\n # print(diningName)\n # print(setOfDays)\n\n\n if \"Closed\" in setOfHours:\n #put as starttime: 0 and endtime: 0 in database\n if diningName not in diningLocations:\n diningLocations[diningName] = []\n\n diningLocations[diningName].append({\n \"daynum\": mapDayToNum[day],\n \"starttime\": 0,\n \"endtime\": 0\n })\n \n #if there are hours\n else:\n # Array of times [start, end] > strings\n hoursSplit = setOfHours.split(' - ')\n # print(hoursSplit)\n \n start = None\n end = None\n\n if \"24 Hours\" in hoursSplit:\n if diningName not in diningLocations:\n diningLocations[diningName] = []\n\n diningLocations[diningName].append({\n \"daynum\": mapDayToNum[day],\n \"starttime\": 0,\n \"endtime\": 24\n })\n continue\n else:\n start = hoursSplit[0]\n end = hoursSplit[1]\n \n\n # AM or PM for both start and end times\n startAMorPM = start[-2:]\n # Start time format: X:XX\n startTime = start[:-2]\n startHourAndMinutes = startTime.split(':')\n\n\n endAMorPM = end[-2:]\n # End time format: X:XX\n endTime = end[:-2]\n endHourAndMinutes = endTime.split(':')\n\n if (startAMorPM == \"AM\" and endAMorPM == \"PM\"\n or startAMorPM == \"PM\" and endAMorPM == \"PM\"):\n daynum = mapDayToNum[day]\n starttime = int(startHourAndMinutes[0])\n starttime += float(startHourAndMinutes[1]) / 60\n\n endtime = int(endHourAndMinutes[0])\n endtime += float(endHourAndMinutes[1]) / 60\n\n #Add 12 if time is PM\n if startAMorPM == \"PM\":\n starttime += 12\n if endAMorPM == \"PM\":\n endtime += 12\n\n if diningName not in diningLocations:\n diningLocations[diningName] = []\n\n diningLocations[diningName].append({\n \"daynum\": daynum,\n \"starttime\": starttime,\n \"endtime\": endtime\n })\n\n\n elif (startAMorPM == \"AM\" and endAMorPM == \"AM\"\n or startAMorPM == \"PM\" and endAMorPM == \"AM\"):\n daynum = mapDayToNum[day]\n starttime = int(startHourAndMinutes[0])\n starttime += float(startHourAndMinutes[1]) / 60\n\n endtime = 24\n\n #Add 12 if starttime is PM\n if startAMorPM == \"PM\":\n starttime += 12\n\n #Reset day back to Sunday if trying to get next day from Saturday\n daynum2 = daynum\n if daynum2 == 7:\n daynum2 = 0\n\n starttime2 = 0\n\n endtime2 = int(endHourAndMinutes[0])\n endtime2 += float(endHourAndMinutes[1]) / 60\n\n if diningName not in diningLocations:\n diningLocations[diningName] = []\n \n diningLocations[diningName].append({\n \"daynum\": daynum,\n \"starttime\": starttime,\n \"endtime\": endtime\n })\n\n diningLocations[diningName].append({\n \"daynum\": daynum2,\n \"starttime\": starttime2,\n \"endtime\": endtime2\n })\n \nprint()\nprint() \n# print(diningLocations)\n\nmapDiningToLocation = {\n \"Cort @ Lower UC\": \"Lower University Center\",\n \"Rathbone Dining Hall\": \"Rathbone Dining Hall\",\n \"Brodhead Dining Hall\": \"Brodhead House\",\n \"Baker's Junction\": \"Upper University Center\",\n \"Upper UC Food Market\": \"Upper University Center\",\n \"Pandini's\": \"Upper University Center\",\n \"Global Café\": \"William's Hall (2nd Floor)\",\n \"Lucy's Café\": \"Linderman Library (Lower level)\",\n \"Iacocca Café\": \"Iacocca Hall\",\n \"The Grind @ FML\": \"E.W. Fairchild-Martindale Library\",\n \"Hawk's Nest\": \"Hawk's Nest Eatery\",\n \"Common Grounds\": \"Rauch Business Center (2nd Floor)\",\n \"Fud Truk\": \"Near E.W. Fairchild-Martindale Library\",\n \"Market X\": \"Building C (Mountaintop Campus)\",\n \"ASA Packer Dining Room\": \"University Center\"\n}\n\ntry:\n #Read database config from json\n with open('./config.json') as jsonFile:\n data = json.load(jsonFile)\n print(data)\n\n #Open database connection\n connection = psycopg2.connect(user = data['user'],\n password = data['password'],\n host = data['host'],\n port = data['port'],\n database = data['database'])\n\n cursor = connection.cursor()\n\n #Selected school\n schoolName = 'lehigh'\n #Get school_id for school\n cursor.execute(\"\"\"SELECT school_id\n FROM schools WHERE school_name=%s;\"\"\"\n ,(schoolName,))\n \n #Returns array of tuples\n school_idFetch = cursor.fetchall()\n\n school_id = None\n #Check if school is in the database or not\n if school_idFetch:\n school_id = school_idFetch[0][0]\n else:\n #Insert into database if school is not in database\n cursor.execute(\"\"\"INSERT INTO schools(school_name) \n VALUES (%s);\"\"\", (schoolName))\n \n newSchool_idFetch = cursor.fetchall()\n school_id = school_idFetch[0][0]\n \n #Clear schedules to insert new schedule\n cursor.execute(\"\"\"TRUNCATE hours\"\"\")\n # connection.commit()\n\n for diningLocation, schedule in diningLocations.items():\n print(diningLocation)\n\n #Insert dining location if it does not exist already\n cursor.execute((\"\"\"INSERT INTO dining_locations (school_id, dining_name, location_name) VALUES (\n\t (SELECT school_id FROM schools WHERE school_name=%s),\n %s,\n %s)\n ON CONFLICT DO NOTHING\"\"\"),\n (schoolName, diningLocation, mapDiningToLocation[diningLocation])\n )\n\n #Insert new hours into hours table\n for eachSchedule in schedule:\n print(eachSchedule)\n daynum = eachSchedule[\"daynum\"]\n starttime = eachSchedule[\"starttime\"]\n endtime = eachSchedule[\"endtime\"]\n\n print('test1')\n # cursor.execute(\"\"\"(SELECT dining_id FROM dining_locations WHERE dining_name=%s)\"\"\",(diningLocation,))\n # print(cursor.fetchall()[0][0])\n\n #FIX ERROR\n cursor.execute((\"\"\"INSERT INTO hours (school_id, dining_id, daynum, starttime, endtime) \n VALUES (\n (SELECT school_id FROM schools WHERE school_name=%s),\n (SELECT dining_id FROM dining_locations WHERE dining_name=%s),\n %s,\n %s,\n %s)\"\"\"),\n (schoolName, diningLocation, daynum, starttime, endtime)\n )\n print('test2')\n \n connection.commit()\n\nexcept (Exception, psycopg2.Error) as error :\n print (\"Error while connecting to PostgreSQL\", error)\n err_type, err_obj, traceback = sys.exc_info()\n line_num = traceback.tb_lineno\n print(line_num)\nfinally:\n #closing database connection.\n if(connection):\n cursor.close()\n connection.close()\n print(\"PostgreSQL connection is closed\")\n\n", "repo_name": "anh212/CollegeEatsBackend", "sub_path": "parser/parser.py", "file_name": "parser.py", "file_ext": "py", "file_size_in_byte": 11054, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "requests.get", "line_number": 39, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 70, "usage_type": "call"}, {"api_name": "json.load", "line_number": 258, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 262, "usage_type": "call"}, {"api_name": "psycopg2.Error", "line_number": 333, "usage_type": "attribute"}, {"api_name": "sys.exc_info", "line_number": 335, "usage_type": "call"}]} +{"seq_id": "3759288338", "text": "\nimport json\nimport socket\nimport time\n\nimport paramiko\nimport redis\nfrom paramiko.ssh_exception import AuthenticationException, SSHException\nfrom flask_sqlalchemy_fix import SQLAlchemy\nfrom flask import current_app\nfrom werkzeug.contrib.cache import RedisCache\n\nfrom .login import LoginManager\nfrom .settings import (REDIS_HOST, REDIS_PORT,\n SSH_KEY_FILENAME,\n SSE_KEEPALIVE_INTERVAL,\n SSE_POLL_INTERVAL)\n\n\nlogin_manager = LoginManager()\n\ndb = SQLAlchemy()\ncache = RedisCache(host=REDIS_HOST, port=REDIS_PORT)\n\n\nclass AppError(Exception):\n \"\"\"Base application error class.\"\"\"\n def __init__(self, msg):\n self.msg = msg\n\n\nclass ConnectionPool(object):\n pool = {}\n\n @classmethod\n def key(cls, *args, **kwargs):\n return ':'.join(args) + \\\n ':'.join('%s=%s' % (k, v) for k, v in kwargs.items())\n\n @classmethod\n def lookup_pool(cls, *args, **kwargs):\n key = cls.key(*args, **kwargs)\n if key not in cls.pool:\n cls.pool[key] = redis.ConnectionPool(*args, **kwargs)\n return cls.pool[key]\n\n @classmethod\n def get_connection(cls):\n pool = cls.lookup_pool(\n host=current_app.config.get('SSE_REDIS_HOST', '127.0.0.1'),\n port=current_app.config.get('SSE_REDIS_PORT', 6379),\n db=current_app.config.get('SSE_REDIS_DB', 0),\n )\n return redis.StrictRedis(connection_pool=pool)\n\n\nclass ExclusiveLock(object):\n \"\"\"Class for acquire and release named locks. It's implemented via\n Redis backend.\n\n \"\"\"\n #: Prefix for all locks created by this class.\n lock_prefix = 'kd.exclusivelock.'\n\n def __init__(self, name, ttl=None):\n \"\"\"Init Lock object.\n :param name: name of the lock\n :param ttl: number of seconds after acquiring when the lock must\n be automatically released.\n \"\"\"\n self._redis_con = ConnectionPool().get_connection()\n self._lock = None\n self.name = self.lock_prefix + name\n self.ttl = ttl\n\n def lock(self, blocking=False):\n \"\"\"Try to acquire the lock.\n If lock is already acquired, then immediately returns False\n if blocking=False (default). Wait lock release if blockging=True.\n If lock has been acquired, then returns True.\n :param blocking: optional flag specifying whether lock should be\n blocking or not\n \"\"\"\n if self._lock is not None:\n return False\n self._lock = self._redis_con.lock(self.name, self.ttl)\n return self._lock.acquire(blocking=blocking)\n\n def release(self):\n \"\"\"Release the lock.\"\"\"\n if self._lock is None:\n return\n self._lock.release()\n\n @classmethod\n def is_acquired(cls, name):\n \"\"\"Checks if the lock was already acquired and not yet released.\"\"\"\n redis_con = ConnectionPool().get_connection()\n name = cls.lock_prefix + name\n lock = redis_con.lock(name, 1)\n res = False\n try:\n res = not lock.acquire(blocking=False)\n finally:\n try:\n lock.release()\n except redis.lock.LockError:\n # exception is raised in case of already released lock\n pass\n return res\n\n @classmethod\n def clean_locks(cls, pattern=None):\n \"\"\"Removes all locks. Optionally may be specified prefix for lock's\n names.\n\n \"\"\"\n redis_con = ConnectionPool().get_connection()\n if pattern:\n pattern = cls.lock_prefix + pattern + '*'\n else:\n pattern = cls.lock_prefix + '*'\n keys = list(redis_con.scan_iter(pattern))\n if keys:\n redis_con.delete(*keys)\n\n\nclass ExclusiveLockContextManager(object):\n\n def __init__(self, name, blocking=False, ttl=None):\n self.blocking = blocking\n self._lock = ExclusiveLock(name, ttl=ttl)\n\n def __enter__(self):\n return self._lock.lock(blocking=self.blocking)\n\n def __exit__(self, *_):\n self._lock.release()\n\n\nclass ServerSentEvents(object):\n\n def __init__(self):\n self._buff = []\n\n @staticmethod\n def _parse_message_text(text, encoding):\n \"\"\"\n Generator to parse and decode data to be sent to SSE endpoint\n @param text: iterable -> list, tuple, set or string to be decoded\n @param encoding: string -> endocing to decode\n @return: generator\n \"\"\"\n if isinstance(text, (list, tuple, set)):\n for item in text:\n if isinstance(item, bytes):\n item = item.decode(encoding)\n for subitem in item.splitlines():\n yield subitem\n else:\n if isinstance(text, bytes):\n text = text.decode(encoding)\n for item in text.splitlines():\n yield item\n\n def make_message(self, eid, event, text, encoding='utf-8'):\n \"\"\"\n Makes message according to SSE standard\n @param eid: int -> message id\n @param event: string -> event type\n @param text: iterable -> message content\n @param encoding: string -> encoding to decode data\n @return: string -> decoded and formatted string data\n \"\"\"\n self._buff.append(\"event:{0}\\n\".format(event))\n for text_item in self._parse_message_text(text, encoding):\n self._buff.append(\"data:{0}\\n\".format(text_item))\n if eid is not None:\n self._buff.append(\"id:{0}\\n\".format(eid))\n self._buff.append('\\n')\n\n def __iter__(self):\n for item in self._buff:\n yield item\n self._buff = []\n\n\nclass EvtStream(object):\n key = 'SSEEVT'\n\n def __init__(self, conn, channel, last_id=None):\n self.conn = conn\n self.channel = channel\n self.pubsub = conn.pubsub()\n self.pubsub.subscribe(channel)\n self.last_id = last_id\n self.timeout = int(SSE_KEEPALIVE_INTERVAL / SSE_POLL_INTERVAL)\n self._time_is_out = self.timeout\n if self.last_id is not None:\n self.last_id = int(self.last_id)\n self.cache_key = ':'.join([self.key, channel])\n\n def __iter__(self):\n sse = ServerSentEvents()\n if self.last_id is not None:\n for key, value in sorted(\n ((int(k), v) for k, v in self.conn.hgetall(\n self.cache_key).iteritems()), key=(lambda x: x[0])):\n if key <= self.last_id:\n continue\n eid, event, data = json.loads(value)\n if not isinstance(data, basestring):\n data = json.dumps(data)\n sse.make_message(eid, event, data)\n for msg in sse:\n yield msg.encode('u8')\n else:\n yield ':\\n\\n'\n while True:\n message = self.pubsub.get_message()\n if message:\n if message['type'] == 'message':\n eid, event, data = json.loads(message['data'])\n if not isinstance(data, basestring):\n data = json.dumps(data)\n sse.make_message(eid, event, data)\n for msg in sse:\n yield msg.encode('u8')\n else:\n if not self._time_is_out:\n self._time_is_out = self.timeout\n yield ':\\n\\n'\n else:\n self._time_is_out -= 1\n time.sleep(SSE_POLL_INTERVAL)\n\n\ndef ssh_connect(host, timeout=10):\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n error_message = None\n try:\n ssh.connect(host, username='root', key_filename=SSH_KEY_FILENAME,\n timeout=timeout)\n except (AuthenticationException, SSHException) as e:\n error_message =\\\n '{0}.\\nCheck hostname, check that user from which '.format(e) +\\\n 'Kuberdock runs (usually nginx) has ability to login as root on ' \\\n 'this node, and try again'\n except socket.timeout:\n error_message = 'Connection timeout({0} sec). '.format(timeout) +\\\n 'Check hostname and try again'\n except socket.error as e:\n error_message =\\\n '{0} Check hostname, your credentials, and try again'.format(e)\n except IOError as e:\n error_message =\\\n 'ssh_connect: cannot use SSH-key: {0}'.format(e)\n return ssh, error_message\n\n\nclass RemoteManager(object):\n \"\"\"\n Set of helper functions for convenient work with remote hosts.\n \"\"\"\n def __init__(self, host, timeout=10):\n self.raw_ssh, self.errors = ssh_connect(host, timeout)\n if self.errors:\n self.raw_ssh = None\n\n def close(self):\n self.raw_ssh.close()\n\n def exec_command(self, cmd):\n \"\"\"\n Asynchronously execute command and return i, o, e streams\n \"\"\"\n return self.raw_ssh.exec_command(cmd)\n\n def fast_cmd(self, cmd):\n \"\"\"\n Synchronously execute command\n :return: exit status and error string or data string if success\n \"\"\"\n i, o, e = self.raw_ssh.exec_command(cmd)\n exit_status = o.channel.recv_exit_status()\n if exit_status == -1:\n return exit_status,\\\n 'No exit status, maybe connection is closed by remote server'\n if exit_status > 0:\n return exit_status, e.read()\n return exit_status, o.read()\n", "repo_name": "cloudlinux/kuberdock-platform", "sub_path": "kubedock/core.py", "file_name": "core.py", "file_ext": "py", "file_size_in_byte": 9556, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 128, "dataset": "github-code", "pt": "94", "api": [{"api_name": "login.LoginManager", "line_number": 20, "usage_type": "call"}, {"api_name": "flask_sqlalchemy_fix.SQLAlchemy", "line_number": 22, "usage_type": "call"}, {"api_name": "werkzeug.contrib.cache.RedisCache", "line_number": 23, "usage_type": "call"}, {"api_name": "settings.REDIS_HOST", "line_number": 23, "usage_type": "name"}, {"api_name": "settings.REDIS_PORT", "line_number": 23, "usage_type": "name"}, {"api_name": "redis.ConnectionPool", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.current_app.config.get", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.current_app.config", "line_number": 50, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 50, "usage_type": "name"}, {"api_name": "flask.current_app.config.get", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.current_app.config", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 51, "usage_type": "name"}, {"api_name": "flask.current_app.config.get", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.current_app.config", "line_number": 52, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 52, "usage_type": "name"}, {"api_name": "redis.StrictRedis", "line_number": 54, "usage_type": "call"}, {"api_name": "redis.lock", "line_number": 107, "usage_type": "attribute"}, {"api_name": "settings.SSE_KEEPALIVE_INTERVAL", "line_number": 197, "usage_type": "name"}, {"api_name": "settings.SSE_POLL_INTERVAL", "line_number": 197, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 211, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 213, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 223, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 225, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 235, "usage_type": "call"}, {"api_name": "settings.SSE_POLL_INTERVAL", "line_number": 235, "usage_type": "argument"}, {"api_name": "paramiko.SSHClient", "line_number": 239, "usage_type": "call"}, {"api_name": "paramiko.AutoAddPolicy", "line_number": 240, "usage_type": "call"}, {"api_name": "settings.SSH_KEY_FILENAME", "line_number": 243, "usage_type": "name"}, {"api_name": "paramiko.ssh_exception.AuthenticationException", "line_number": 245, "usage_type": "name"}, {"api_name": "paramiko.ssh_exception.SSHException", "line_number": 245, "usage_type": "name"}, {"api_name": "socket.timeout", "line_number": 250, "usage_type": "attribute"}, {"api_name": "socket.error", "line_number": 253, "usage_type": "attribute"}]} +{"seq_id": "20500366098", "text": "import glob\nimport os\nimport re\nimport subprocess\nimport sys\n\nimport pandas as pd\nimport numpy as np\nimport yaml\n\n\ndef cifti_parcelate(wb_command, cifti, parcellation):\n \"\"\"\n this wraps the wb_command bash utility to be used in python\n \"\"\"\n parc_name = '.'.join(parcellation.split('/')[-1].split('.')[:-3])\n parcelated_cifti = '.'.join(cifti.split('.')[:-3])+'.'+parc_name+'.pscalar.nii'\n if os.path.isfile(parcelated_cifti):\n # ...\n print('%s exists. skipping parcellation step.' % parcelated_cifti, file=sys.stdout)\n else:\n cmd = [wb_command, '-cifti-parcellate', cifti, parcellation, 'COLUMN', parcelated_cifti]\n subprocess.call(cmd)\n return parcelated_cifti\n\n\ndef cifti_convert_to_text(wb_command, cifti):\n \"\"\"\n this wraps the wb_command bash utility to be used in python\n \"\"\"\n path2txt = '.'.join(cifti.split('.')[:-2])+'.txt'\n if os.path.isfile(path2txt):\n # ...\n print('%s exists. skipping conversion step.' % path2txt, file=sys.stdout)\n else:\n cmd = [wb_command, '-cifti-convert', '-to-text', cifti, path2txt]\n subprocess.call(cmd)\n return path2txt\n\n\ndef get_scanid(sub_id, path):\n \"\"\"\n Extracting visit date and creating unique scan_id\n from full path to a subjects directory.\n \"\"\"\n find = sub_id + '/(.+?)-SIEMENS'\n m = re.search(find, path)\n if m:\n visit = m.group(1)\n return sub_id + '_' + visit\n\n\ndef get_scanpaths(sub_path, sub_id, end_file, do='connectivity'):\n \"\"\"\n takes in subjects path, subjects id, and the end file being looked at\n returns a list of full paths to that end file for all visits of that subject.\n \"\"\"\n lookup = {'connectivity': ('analyses_v2', 'timecourses'), 'morphology': ('MNINonLinear', 'fsaverage_LR32k')}\n folder = lookup[do]\n wild_path = os.path.join(sub_path + sub_id, '*', '*', sub_id,\n folder[0], folder[1], end_file)\n return glob.glob(wild_path)\n\n\ndef make_sublist(sub_path):\n \"\"\"\n given a path to a study directory\n make a list of all subjects in that directory\n \"\"\"\n sub_list = []\n for (dirpath, dirnames, filenames) in os.walk(sub_path):\n sub_list.extend(dirnames)\n break\n print(sub_list)\n return sub_list\n\n\ndef make_vectorized_df(path, network, use_regions):\n \"\"\"\n takes in the path to a subjects parcellated timecourse, network assignment, and the regions being used.\n trims the timecourse to only contain regions being used\n computes a correlation matrix of the remaining regions\n returns a vectorized upper triangle of the corr matrix\n \"\"\"\n timecourse = pd.read_csv(path, header=None)\n timecourse = timecourse.T\n trimmed_timecourse = timecourse.loc[use_regions.iloc[:, network], :]\n corr_matrix = trimmed_timecourse.T.corr()\n utri = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool))\n return utri.stack().reset_index()\n\n\ndef interface(neuro_path, sub_path, out_path, z_threshold, timecourse_csv, network_files,\n morph_target_file=None, parcellation=None, wb_command=None):\n neuro_voxel = pd.read_csv(neuro_path, header=None)\n use_regions = neuro_voxel > z_threshold # Use only the regions that have a z-score > 1.97\n sub_list = make_sublist(sub_path)\n\n for network, save_name in enumerate(network_files):\n print(network)\n\n full_path_0 = get_scanpaths(sub_path, sub_list[0], timecourse_csv)\n if not full_path_0:\n print(sub_list[0], \"glob target for full_path_0 doesn't exist\", file=sys.stderr)\n continue\n\n in_df = make_vectorized_df(full_path_0[0], network, use_regions)\n in_df.columns = ['Region_1', 'Region_2', 'sub_id']\n in_df.drop('sub_id', axis=1, inplace=True) # creating df with use region pairs; loop below will feed into this\n\n if morph_target_file:\n morph_df = pd.DataFrame()\n\n for sub_id in sub_list:\n full_path = get_scanpaths(sub_path, sub_id, timecourse_csv)\n if not full_path:\n print(sub_id, \"glob target for full_path doesn't exist\", file=sys.stderr)\n continue\n\n for path in full_path: # looping through each timepoint\n subject = get_scanid(sub_id, path)\n sub_df = make_vectorized_df(path, network, use_regions)\n sub_df.columns = ['Region_1', 'Region_2', subject]\n\n # Merging each subjects connectivity values into the df created in the previous part\n in_df = pd.merge(in_df, sub_df, 'inner', on=['Region_1', 'Region_2'])\n\n if morph_target_file:\n all_morph_paths = get_scanpaths(sub_path, sub_id, sub_id+'.'+morph_target_file, do='morphology')\n if not all_morph_paths:\n print(sub_id, \"glob target for all_morph_paths doesn't exist\", file=sys.stderr)\n continue\n\n for morph_path in all_morph_paths:\n morph_subject = get_scanid(sub_id, morph_path)\n cifti_txt = cifti_convert_to_text(wb_command, cifti_parcelate(wb_command, morph_path, parcellation))\n if not os.path.isfile(cifti_txt):\n print(morph_subject, 'Not able to parcellate dscalar', file=sys.stderr)\n continue\n\n cifti_df = pd.read_table(cifti_txt, header=None)\n\n # Trimming morphology df to only contain use_regions\n morph_sub_df = cifti_df.loc[use_regions.iloc[:, network], :]\n morph_sub_df.columns = [morph_subject]\n\n # Concatenating each subjects connectivity values into main empty\n morph_df = pd.concat([morph_df, morph_sub_df], axis=1) # concatenating each\n # Adding one to region names to account for 0-indexing\n in_df['Region_1'] += 1\n in_df['Region_2'] += 1\n if morph_target_file:\n morph_df.index += 1\n\n # Deleting Cerebellum_left and right, Diencephalon_ventral_left and right and brain stem\n censored_regions = [361, 366, 370, 371, 379]\n for key in censored_regions:\n in_df = in_df[in_df.Region_1 != key]\n in_df = in_df[in_df.Region_2 != key]\n\n # Creating region pair connectivity headers\n reg_p = '[' + in_df['Region_1'].astype(str) + ',' + in_df['Region_2'].astype(str) + ']'\n in_df.drop(['Region_1', 'Region_2'], axis=1, inplace=True)\n in_df.insert(0, 'Region_pairs', reg_p)\n in_df['Region_pairs'].astype(str)\n print(in_df.T)\n\n # Saving it out subjects down rows; region pairs across columns\n in_df.T.to_csv(os.path.join(out_path, save_name), header=False)\n if morph_target_file:\n morph_save_name = '.'.join(save_name.split('.')[:-1])+'_'+morph_target_file.split('.')[0]+'.csv'\n print(morph_save_name)\n print(morph_df.T)\n morph_df.T.to_csv(os.path.join(out_path, morph_save_name))\n\n\ndef cli_interface():\n try:\n yaml_config = sys.argv[1]\n except:\n print(\"usage: {} \".format(sys.argv[0]))\n sys.exit(1)\n with open(yaml_config, 'r') as f:\n args = yaml.load(f)\n interface(**args)\n\n\nif __name__ == '__main__':\n cli_interface()\n", "repo_name": "moosahmed/neurosynth", "sub_path": "extract_connectivity_per_network.py", "file_name": "extract_connectivity_per_network.py", "file_ext": "py", "file_size_in_byte": 7354, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "os.path.isfile", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 20, "usage_type": "attribute"}, {"api_name": "subprocess.call", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 34, "usage_type": "attribute"}, {"api_name": "subprocess.call", "line_number": 37, "usage_type": "call"}, {"api_name": "re.search", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 62, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 71, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.triu", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 89, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 95, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 104, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 112, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 117, "usage_type": "attribute"}, {"api_name": "pandas.merge", "line_number": 126, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 131, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path", "line_number": 137, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 138, "usage_type": "attribute"}, {"api_name": "pandas.read_table", "line_number": 141, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 169, "usage_type": "call"}, {"api_name": "os.path", "line_number": 169, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 174, "usage_type": "call"}, {"api_name": "os.path", "line_number": 174, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 179, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 181, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 182, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 184, "usage_type": "call"}]} +{"seq_id": "30462181223", "text": "# in charge of parsing and returning data from the query layers spreadsheet\n# which confusingly has more than just query layers data in it. :)\n\nimport logging\nfrom os import path\nfrom time import sleep, time\n\nimport pygsheets\nimport settings\nfrom settings import fieldnames\n\nlogger = logging.getLogger('forklift')\nqlFields = [\n #: [, ]\n ['Name', fieldnames.name],\n ['Layer Description', fieldnames.description],\n ['Metadata Link', fieldnames.metaDataUrl],\n ['OID Field', fieldnames.oidField],\n [fieldnames.ID, fieldnames.ID],\n ['Division Heading', fieldnames.heading],\n [fieldnames.NAME, fieldnames.NAME],\n [fieldnames.ADDRESS, fieldnames.ADDRESS],\n [fieldnames.CITY, fieldnames.CITY],\n [fieldnames.TYPE, fieldnames.TYPE],\n ['Source Data', fieldnames.sourceData],\n ['SGID Feature Class Name', fieldnames.sgidName],\n ['Geometry Type', fieldnames.geometryType],\n ['ETL Type', fieldnames.etlType],\n ['Identify Attributes', fieldnames.fields],\n ['Document Search', fieldnames.docLink],\n ['GRAMA Request', fieldnames.gramaLink],\n ['Permit Information', fieldnames.permitLink],\n ['Additional Information', fieldnames.additionalLink],\n ['Map Label Field', fieldnames.ENVIROAPPLABEL],\n ['Secure', fieldnames.secure],\n ['Special Filters', fieldnames.specialFilters],\n ['Special Filter Default To On', fieldnames.specialFiltersDefaultOn],\n ['Additional Searches', fieldnames.additionalSearches],\n ['Custom Symbology Field', fieldnames.ENVIROAPPSYMBOL],\n ['Sort Field', fieldnames.sortField],\n ['Related Tables', fieldnames.relatedTables],\n ['Legend Title', fieldnames.legendTitle],\n ['Coded Values', fieldnames.codedValues]\n]\n\ntblFields = [\n ['Tab Name', fieldnames.name],\n ['Source Data', fieldnames.sourceData],\n ['SGID Table Name', fieldnames.sgidName],\n ['Fields', fieldnames.fields],\n ['Additional Information', fieldnames.additionalLink],\n ['Additional Information Link Fields', fieldnames.additionalLinkFields],\n ['OID Field', fieldnames.oidField]\n]\n\nlinksFields = [\n [fieldnames.ID, fieldnames.ID],\n ['Description', fieldnames.description],\n ['URL', fieldnames.url]\n]\n\ncredentials = path.join(path.dirname(__file__), 'settings', 'ut-dts-agrc-deq-enviro-prod-aecfbfdedcc3.json')\ngc = None\nsheet = None\nauthorize_time = None\n\n\ndef _login():\n global gc, sheet, authorize_time\n tries = 1\n max_tries = 10\n authorize_shelf_life = 600 #: 10 minutes\n\n while tries <= max_tries:\n try:\n if gc is None or authorize_time is None or time() - authorize_time > authorize_shelf_life:\n logger.debug('logging into google spreadsheet')\n authorize_time = time()\n gc = pygsheets.authorize(service_file=credentials)\n sheet = gc.open_by_url(settings.queryLayersUrl)\n\n return sheet\n except Exception as ex:\n if tries == max_tries:\n raise ex\n\n logger.warn('login error, retrying...')\n sleep(30)\n\n tries = tries + 1\n\n\ndef get_query_layers():\n return _get_worksheet_data(_login().worksheet('title', 'Query Layers'), qlFields)\n\n\ndef get_related_tables():\n return _get_worksheet_data(_login().worksheet('title', 'Related Tables'), tblFields)\n\n\ndef get_links():\n return _get_worksheet_data(_login().worksheet('title', 'Other Links'), linksFields)\n\n\ndef get_relationship_classes():\n return _login().worksheet('title', 'Relationship Classes').get_all_records()\n\n\ndef _get_worksheet_data(wksh, fields):\n data = []\n fieldIndices = {}\n firstRow = True\n for row in wksh.get_all_values():\n if firstRow:\n # get field indices\n i = 0\n for cell in row:\n fieldIndices[cell] = i\n i = i + 1\n\n firstRow = False\n continue\n\n o = {}\n for f in fields:\n o[f[1]] = row[fieldIndices[f[0]]].strip()\n data.append(o)\n\n return data\n", "repo_name": "agrc/deq-enviro", "sub_path": "scripts/nightly/spreadsheet.py", "file_name": "spreadsheet.py", "file_ext": "py", "file_size_in_byte": 4066, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "94", "api": [{"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "settings.fieldnames.name", "line_number": 15, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 15, "usage_type": "name"}, {"api_name": "settings.fieldnames.description", "line_number": 16, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 16, "usage_type": "name"}, {"api_name": "settings.fieldnames.metaDataUrl", "line_number": 17, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 17, "usage_type": "name"}, {"api_name": "settings.fieldnames.oidField", "line_number": 18, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 18, "usage_type": "name"}, {"api_name": "settings.fieldnames.ID", "line_number": 19, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 19, "usage_type": "name"}, {"api_name": "settings.fieldnames.heading", "line_number": 20, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 20, "usage_type": "name"}, {"api_name": "settings.fieldnames.NAME", "line_number": 21, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 21, "usage_type": "name"}, {"api_name": "settings.fieldnames.ADDRESS", "line_number": 22, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 22, "usage_type": "name"}, {"api_name": "settings.fieldnames.CITY", "line_number": 23, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 23, "usage_type": "name"}, {"api_name": "settings.fieldnames.TYPE", "line_number": 24, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 24, "usage_type": "name"}, {"api_name": "settings.fieldnames.sourceData", "line_number": 25, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 25, "usage_type": "name"}, {"api_name": "settings.fieldnames.sgidName", "line_number": 26, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 26, "usage_type": "name"}, {"api_name": "settings.fieldnames.geometryType", "line_number": 27, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 27, "usage_type": "name"}, {"api_name": "settings.fieldnames.etlType", "line_number": 28, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 28, "usage_type": "name"}, {"api_name": "settings.fieldnames.fields", "line_number": 29, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 29, "usage_type": "name"}, {"api_name": "settings.fieldnames.docLink", "line_number": 30, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 30, "usage_type": "name"}, {"api_name": "settings.fieldnames.gramaLink", "line_number": 31, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 31, "usage_type": "name"}, {"api_name": "settings.fieldnames.permitLink", "line_number": 32, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 32, "usage_type": "name"}, {"api_name": "settings.fieldnames.additionalLink", "line_number": 33, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 33, "usage_type": "name"}, {"api_name": "settings.fieldnames.ENVIROAPPLABEL", "line_number": 34, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 34, "usage_type": "name"}, {"api_name": "settings.fieldnames.secure", "line_number": 35, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 35, "usage_type": "name"}, {"api_name": "settings.fieldnames.specialFilters", "line_number": 36, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 36, "usage_type": "name"}, {"api_name": "settings.fieldnames.specialFiltersDefaultOn", "line_number": 37, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 37, "usage_type": "name"}, {"api_name": "settings.fieldnames.additionalSearches", "line_number": 38, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 38, "usage_type": "name"}, {"api_name": "settings.fieldnames.ENVIROAPPSYMBOL", "line_number": 39, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 39, "usage_type": "name"}, {"api_name": "settings.fieldnames.sortField", "line_number": 40, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 40, "usage_type": "name"}, {"api_name": "settings.fieldnames.relatedTables", "line_number": 41, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 41, "usage_type": "name"}, {"api_name": "settings.fieldnames.legendTitle", "line_number": 42, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 42, "usage_type": "name"}, {"api_name": "settings.fieldnames.codedValues", "line_number": 43, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 43, "usage_type": "name"}, {"api_name": "settings.fieldnames.name", "line_number": 47, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 47, "usage_type": "name"}, {"api_name": "settings.fieldnames.sourceData", "line_number": 48, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 48, "usage_type": "name"}, {"api_name": "settings.fieldnames.sgidName", "line_number": 49, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 49, "usage_type": "name"}, {"api_name": "settings.fieldnames.fields", "line_number": 50, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 50, "usage_type": "name"}, {"api_name": "settings.fieldnames.additionalLink", "line_number": 51, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 51, "usage_type": "name"}, {"api_name": "settings.fieldnames.additionalLinkFields", "line_number": 52, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 52, "usage_type": "name"}, {"api_name": "settings.fieldnames.oidField", "line_number": 53, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 53, "usage_type": "name"}, {"api_name": "settings.fieldnames.ID", "line_number": 57, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 57, "usage_type": "name"}, {"api_name": "settings.fieldnames.description", "line_number": 58, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 58, "usage_type": "name"}, {"api_name": "settings.fieldnames.url", "line_number": 59, "usage_type": "attribute"}, {"api_name": "settings.fieldnames", "line_number": 59, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 62, "usage_type": "call"}, {"api_name": "time.time", "line_number": 76, "usage_type": "call"}, {"api_name": "time.time", "line_number": 78, "usage_type": "call"}, {"api_name": "pygsheets.authorize", "line_number": 79, "usage_type": "call"}, {"api_name": "settings.queryLayersUrl", "line_number": 80, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 88, "usage_type": "call"}]} +{"seq_id": "1728093414", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport xlrd\n\n\ndef get_province_city():\n xls_path = './tools/city.xls'\n print(xls_path)\n book = xlrd.open_workbook(xls_path)\n sheet = book.sheets()[0]\n name_ = list()\n for r in range(1, sheet.nrows):\n # code = sheet.cell(r, 0).value\n name = sheet.cell(r, 1).value\n temp = get_keyword(name)\n if temp:\n\n name_.append(temp[0])\n name_.append(temp[1])\n\n\n return name_\n\ndef get_keyword(word):\n if '省' in word:\n temp = word.replace('省','')\n return word,temp\n elif '市' in word:\n temp = word.replace('市','')\n return word,temp\n else:\n return None\n\n\ndef get_keyword_two(word):\n if '自治区' in word:\n word = word.replace('自治区', '')\n return word\n if '自治县' in word:\n word = word.replace('自治县', '')\n return word\n if '县' in word:\n if len(word) > 2:\n word = word.replace('县', '')\n return word\n else:\n return word\n if '区' in word:\n if len(word) > 2:\n word = word.replace('区', '')\n return word\n else:\n return word\n if '市' in word:\n if len(word) > 2:\n word = word.replace('市', '')\n return word\n else:\n return word\n if '省' in word:\n word = word.replace('省', '')\n return word\n if '旗' in word:\n word = word.replace('旗', '')\n return word\n else:\n return None\n\n", "repo_name": "timor1988/keyword-extract-from-company-name", "sub_path": "city.py", "file_name": "city.py", "file_ext": "py", "file_size_in_byte": 1568, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "94", "api": [{"api_name": "xlrd.open_workbook", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "12732813661", "text": "from django.urls.conf import path\nfrom .views import (\n betikaSubscriber, betpawaSubscriber, bongobongobetSubscriber, fixedMSubscriber, guruSubscriber, mozzartSubscriber, multi1X2, multiGG, multiHft, multiOvund, premiumSubscriber, \n sportpesaSubscriber, sportybetSubscriber, vipSubscriber)\n\napp_name = 'subs'\n\nurlpatterns = [\n path('premium/', premiumSubscriber),\n path('vip/', vipSubscriber),\n path('fixedm/', fixedMSubscriber),\n path('guru/', guruSubscriber),\n path('sportpesa/', sportpesaSubscriber),\n path('sportybet/', sportybetSubscriber),\n path('betika/', betikaSubscriber),\n path('betpawa/', betpawaSubscriber),\n path('bongobongo/', bongobongobetSubscriber),\n path('mozzart/', mozzartSubscriber),\n path('1x2/', multi1X2),\n path('gg/', multiGG),\n path('ovund/', multiOvund),\n path('htft/', multiHft),\n]", "repo_name": "Festorz/Tips-Predictions-Python-Django-Server", "sub_path": "subscriptions/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 857, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "django.urls.conf.path", "line_number": 9, "usage_type": "call"}, {"api_name": "views.premiumSubscriber", "line_number": 9, "usage_type": "argument"}, {"api_name": "django.urls.conf.path", "line_number": 10, "usage_type": "call"}, {"api_name": "views.vipSubscriber", "line_number": 10, "usage_type": "argument"}, {"api_name": "django.urls.conf.path", "line_number": 11, "usage_type": "call"}, {"api_name": "views.fixedMSubscriber", "line_number": 11, "usage_type": "argument"}, {"api_name": "django.urls.conf.path", "line_number": 12, "usage_type": "call"}, {"api_name": "views.guruSubscriber", "line_number": 12, "usage_type": "argument"}, {"api_name": "django.urls.conf.path", "line_number": 13, "usage_type": "call"}, {"api_name": "views.sportpesaSubscriber", "line_number": 13, "usage_type": "argument"}, {"api_name": "django.urls.conf.path", "line_number": 14, "usage_type": "call"}, {"api_name": "views.sportybetSubscriber", "line_number": 14, "usage_type": "argument"}, {"api_name": "django.urls.conf.path", "line_number": 15, "usage_type": "call"}, {"api_name": "views.betikaSubscriber", "line_number": 15, "usage_type": "argument"}, {"api_name": "django.urls.conf.path", "line_number": 16, "usage_type": "call"}, {"api_name": "views.betpawaSubscriber", "line_number": 16, "usage_type": "argument"}, {"api_name": "django.urls.conf.path", "line_number": 17, "usage_type": "call"}, {"api_name": "views.bongobongobetSubscriber", "line_number": 17, "usage_type": "argument"}, {"api_name": "django.urls.conf.path", "line_number": 18, "usage_type": "call"}, {"api_name": "views.mozzartSubscriber", "line_number": 18, "usage_type": "argument"}, {"api_name": "django.urls.conf.path", "line_number": 19, "usage_type": "call"}, {"api_name": "views.multi1X2", "line_number": 19, "usage_type": "argument"}, {"api_name": "django.urls.conf.path", "line_number": 20, "usage_type": "call"}, {"api_name": "views.multiGG", "line_number": 20, "usage_type": "argument"}, {"api_name": "django.urls.conf.path", "line_number": 21, "usage_type": "call"}, {"api_name": "views.multiOvund", "line_number": 21, "usage_type": "argument"}, {"api_name": "django.urls.conf.path", "line_number": 22, "usage_type": "call"}, {"api_name": "views.multiHft", "line_number": 22, "usage_type": "argument"}]} +{"seq_id": "42766488708", "text": "import torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass GRUEncoder(nn.Module):\n def __init__(self, d_input, d_hidden, d_output, args):\n super(GRUEncoder, self).__init__()\n\n self.args = args\n\n self.d_input = d_input\n self.d_hidden = d_hidden\n self.d_output = d_output\n\n self.fc1 = nn.Linear(d_input, d_hidden)\n self.rnn = nn.GRUCell(d_hidden, d_hidden)\n self.fc2 = nn.Linear(d_hidden, d_output)\n\n def init_hidden(self, bs):\n # make hidden states on same device as model\n return self.fc1.weight.new(1, self.d_hidden).zero_().expand(bs, -1).contiguous()\n\n def forward(self, inputs, hidden_state, **kwargs):\n x = F.relu(self.fc1(inputs))\n h_in = hidden_state.reshape(-1, self.d_hidden)\n h = self.rnn(x, h_in)\n z = self.fc2(h)\n \n if self.args.normalize_z:\n z = F.normalize(z, p=2, dim=1)\n\n return z, h\n", "repo_name": "DeepAI-Comm/CoordinationSchemeProbing", "sub_path": "src/modules/encoders/gru_encoder.py", "file_name": "gru_encoder.py", "file_ext": "py", "file_size_in_byte": 942, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "torch.nn.Module", "line_number": 5, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 5, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.GRUCell", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.nn.functional.normalize", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "25785540825", "text": "from pathlib import PurePath\r\n\r\n\r\ndef bounds_mapping(max_rows):\r\n # Basically, as an array (for ease), the following items to map one edge to\r\n # the corresponding edge elsewhere, 5 edges are adjacent in the net,\r\n # leaving 7 to be mapped. If done carefully we can search it \"backwards\".\r\n #\r\n # x1, y1, x2, y2 - source line (x1==x2 or y1==y2, and x1<=x2 and y1<=y2)\r\n # source direction (since some internal corners are ambiguous)\r\n # x3, y3, x4, y4 - target line (x and y equal etc., but x4<=x3 possible)\r\n # target direction\r\n #\r\n # Directions: 0=R, 1=D, 2=L, 3=U\r\n if max_rows == 12:\r\n # Start with the top edge and work clockwise round.\r\n return [\r\n [-1, 8, -1, 11, 3, 4, 3, 4, 0, 1],\r\n [0, 12, 3, 12, 0, 11, 15, 11, 12, 2],\r\n [4, 12, 7, 12, 0, 8, 15, 8, 12, 1],\r\n [12, 12, 12, 15, 1, 0, 7, 0, 4, 0],\r\n [12, 8, 12, 11, 1, 7, 3, 7, 0, 3],\r\n [8, 8, 11, 8, 2, 7, 7, 7, 4, 3],\r\n [3, 4, 3, 7, 3, 0, 8, 3, 8, 0],\r\n ]\r\n else:\r\n # And the puzzle mapping is completely different, not even scaled...\r\n return [\r\n [-1, 50, -1, 99, 3, 150, 0, 199, 0, 0],\r\n [-1, 100, -1, 149, 3, 199, 0, 199, 49, 3],\r\n [0, 150, 49, 150, 0, 149, 99, 100, 99, 2],\r\n [50, 100, 50, 149, 1, 50, 99, 99, 99, 2],\r\n [150, 50, 150, 99, 1, 150, 49, 199, 49, 2],\r\n [100, -1, 149, -1, 2, 49, 50, 0, 50, 0],\r\n [99, 0, 99, 49, 3, 50, 50, 99, 50, 0],\r\n ]\r\n\r\n\r\ndef in_range(value, start, end):\r\n return start <= value <= end if start <= end else end <= value <= start\r\n\r\n\r\ndef opposite(direction):\r\n return (direction + 2) % 4\r\n\r\n\r\ndef get_mapped_position(position, direction, max_row):\r\n mapping = bounds_mapping(max_row)\r\n\r\n for m in mapping:\r\n if (\r\n m[0] <= position[0] <= m[2]\r\n and m[1] <= position[1] <= m[3]\r\n and direction == m[4]\r\n ):\r\n offset = position[0] - m[0] if m[1] == m[3] else position[1] - m[1]\r\n\r\n # Does the varying range run backwards?\r\n if m[7] < m[5] or m[8] < m[6]:\r\n offset = -offset\r\n\r\n if m[5] == m[7]:\r\n position[0] = m[5]\r\n position[1] = m[6] + offset\r\n else:\r\n position[0] = m[5] + offset\r\n position[1] = m[6]\r\n\r\n direction = m[9]\r\n return position, direction\r\n\r\n # More care with mapping the other way...\r\n # Work out the \"slightly outside\" location.\r\n if m[5] == m[7]:\r\n drow = 1 if m[9] == 1 else -1\r\n dcol = 0\r\n else:\r\n drow = 0\r\n dcol = 1 if m[9] == 0 else -1\r\n\r\n if (\r\n in_range(position[0] + drow, m[5], m[7] + drow)\r\n and in_range(position[1] + dcol, m[6], m[8] + dcol)\r\n and direction == opposite(m[9])\r\n ):\r\n # Ensure the offset is positive, despite the range\r\n offset = (\r\n abs(m[5] - position[0]) if m[6] == m[8] else abs(m[6] - position[1])\r\n )\r\n\r\n # The start positions are already \"off-board\"\r\n if m[0] == m[2]:\r\n position[0] = m[0] + (1 if m[4] == 3 else -1)\r\n position[1] = m[1] + offset\r\n else:\r\n position[0] = m[0] + offset\r\n position[1] = m[1] + (1 if m[4] == 2 else -1)\r\n\r\n direction = opposite(m[4])\r\n return position, direction\r\n\r\n # If we got here, it's not good...\r\n assert False\r\n\r\n\r\ndef follow_path(board, max_row, max_column, path, start_column, iscube):\r\n moves = [[0, 1], [1, 0], [0, -1], [-1, 0]]\r\n\r\n position = [0, start_column]\r\n direction = 0\r\n turning = False\r\n\r\n while path:\r\n item = path[0]\r\n\r\n if turning:\r\n direction += 1 if item == \"R\" else -1\r\n direction %= 4\r\n else:\r\n while item:\r\n new = [\r\n position[0] + moves[direction][0],\r\n position[1] + moves[direction][1],\r\n ]\r\n next_direction = direction\r\n\r\n if iscube:\r\n off_board = False\r\n\r\n if (\r\n 0 <= new[0] < max_row\r\n and 0 <= new[1] < max_column\r\n and board[new[0]][new[1]] == 0\r\n ):\r\n off_board = True\r\n\r\n if not 0 <= new[0] < max_row or not 0 <= new[1] < max_column:\r\n off_board = True\r\n\r\n if off_board:\r\n new, next_direction = get_mapped_position(\r\n new, direction, max_row\r\n )\r\n else:\r\n new[0] %= max_row\r\n new[1] %= max_column\r\n\r\n while board[new[0]][new[1]] == 0:\r\n new[0] = (new[0] + moves[direction][0]) % max_row\r\n new[1] = (new[1] + moves[direction][1]) % max_column\r\n\r\n if board[new[0]][new[1]] == 2:\r\n break\r\n else:\r\n item -= 1\r\n\r\n position = new\r\n direction = next_direction\r\n\r\n path = path[1:]\r\n turning = not turning\r\n\r\n return 1000 * (position[0] + 1) + 4 * (position[1] + 1) + direction\r\n\r\n\r\ndef print_board(board, position1=None, position2=None):\r\n print(f\"Pos 1: {position1} Pos 2: {position2}\")\r\n\r\n for row in range(len(board)):\r\n line = \"\"\r\n\r\n for column in range(len(board[row])):\r\n value = board[row][column]\r\n\r\n if position1 and position1[0] == row and position1[1] == column:\r\n line += \"1\"\r\n elif position2 and position2[0] == row and position2[1] == column:\r\n line += \"2\"\r\n elif value == 1:\r\n line += \".\"\r\n elif value == 2:\r\n line += \"#\"\r\n else:\r\n line += \" \"\r\n\r\n print(f\"{row:>2}: {line}\")\r\n\r\n\r\ndef read_path(path: str):\r\n instructions = []\r\n\r\n length = 0\r\n\r\n while path:\r\n char = path[0]\r\n\r\n if char.isnumeric():\r\n length *= 10\r\n length += int(char)\r\n elif char in (\"RL\"):\r\n if length != 0:\r\n instructions.append(length)\r\n length = 0\r\n instructions.append(char)\r\n\r\n path = path[1:]\r\n\r\n if length != 0:\r\n instructions.append(length)\r\n\r\n return instructions\r\n\r\n\r\ndef read_board(lines, board, max_column):\r\n max_row = 0\r\n empty_row = [0] * max_column\r\n start_column = None\r\n\r\n for line in lines:\r\n line = line.rstrip(\"\\n\")\r\n\r\n if not line:\r\n continue\r\n\r\n if line[0] not in (\" .#\"):\r\n path = read_path(line)\r\n break\r\n\r\n board.append(empty_row.copy())\r\n\r\n for column in range(len(line)):\r\n char = line[column]\r\n\r\n if char == \".\":\r\n if start_column is None and max_row == 0:\r\n start_column = column\r\n\r\n board[max_row][column] = 1\r\n elif char == \"#\":\r\n board[max_row][column] = 2\r\n\r\n max_row += 1\r\n\r\n return path, max_row, start_column\r\n\r\n\r\ndef main(day: int, input_path: str, input_type: str):\r\n with open(f\"{input_path}/{input_type}/Day{day:02}.txt\", \"r\") as f:\r\n lines = f.readlines()\r\n\r\n board = []\r\n max_column = max([len(line) for line in lines])\r\n\r\n path, max_row, start_column = read_board(lines, board, max_column)\r\n\r\n result = follow_path(board, max_row, max_column, path, start_column, False)\r\n print(f\"{input_type:>6} Part 1: {result}\")\r\n\r\n result = follow_path(board, max_row, max_column, path, start_column, True)\r\n print(f\"{input_type:>6} Part 2: {result}\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n here = PurePath(__file__)\r\n day = int(here.name[3:5])\r\n input_path = f\"../../AOCdata/{here.parent.name}\"\r\n\r\n main(day, input_path, \"Test\")\r\n main(day, input_path, \"Puzzle\")\r\n", "repo_name": "RichardBrookfield/AOC", "sub_path": "2022/Day22.py", "file_name": "Day22.py", "file_ext": "py", "file_size_in_byte": 8230, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "94", "api": [{"api_name": "pathlib.PurePath", "line_number": 264, "usage_type": "call"}]} +{"seq_id": "32328721041", "text": "import requests\n\ntry:\n base = input(\"I can show you the latest currencies.\"\n \"\\n Please enter what type of currency you would like to know(for example from: USD, to ILS) from: \")\n rate = input(\"Convert to: \")\n url = f\"https://api.apilayer.com/fixer/latest?symbols={rate}&base={base}\"\n\n payload = {}\n headers = {\n \"apikey\": \"SNtRFTb0ugdyfFcnQMY1t5qnzOAPMQBw\"\n }\n response = requests.request(\"GET\", url, headers=headers, data=payload)\n # response: This variable will store the response object returned by the API endpoint.\n # requests.request(): This function is used to send an HTTP request to a server. It takes several arguments to configure the request, including the HTTP method, the URL, request headers, and data to be sent with the request.\n # \"GET\": This string specifies the HTTP method used for the request. In this case, it is the GET method, which is used to retrieve data from the server.\n # url: This variable contains the URL of the API endpoint that we want to send the request to.\n # headers=headers: This specifies the headers to be included in the request. In this case, it is the headers dictionary containing the \"apikey\" header with the API key.\n # data=payload: This specifies the data to be sent with the request. In this case, it is an empty dictionary because we are not sending any data with the request.\n # So, overall, this line of code is sending an HTTP GET request to the specified API endpoint with the \"apikey\" header included in the request header, and no data included in the request body. The response object returned by the API endpoint will be stored in the response variable for further processing.\n data = response.json()\n ils_rate = data[\"rates\"][\"ILS\"]\n\n print(f\"The exchange rate for 1 USD to ILS is: {ils_rate}\")\n\nexcept:\n print(\"Could not get rate from API using default rate...\")\n", "repo_name": "NatanelAmlinsky/Currency_converter_project", "sub_path": "apiCurrencies.py", "file_name": "apiCurrencies.py", "file_ext": "py", "file_size_in_byte": 1903, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "requests.request", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "6180775230", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nThis module provides a simple multi-threaded writer utility for VSGProjects and VSGSolutions\n\"\"\"\nimport os\nimport sys\nimport time\nimport threading\nimport itertools\nimport jinja2\nimport errno\n\n\nclass VSGJinjaRenderer(object):\n \"\"\"\n A class defining methods interacting with `Jinja2 `_.\n \"\"\"\n\n def render(self, template, filename, context={}, filters={}):\n \"\"\"\n Renders a Jinja2 template to text.\n \"\"\"\n filename = os.path.normpath(filename)\n path, file = os.path.split(filename)\n try:\n os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\n path, file = os.path.split(template)\n loader = jinja2.FileSystemLoader(path)\n env = jinja2.Environment(loader=loader, trim_blocks=True, lstrip_blocks=True)\n env.filters.update(filters)\n template = env.get_template(file)\n text = template.render(context)\n with open(filename, 'wt') as f:\n f.write(text)\n\n\nclass VSGWritable(object):\n \"\"\"\n An interface class defining methods necessary for VSGWriter\n \"\"\"\n __writable_name__ = \"Unknown Writable\"\n\n def write(self):\n \"\"\"\n Interface method to 'write' the object.\n \"\"\"\n raise NotImplementedError(\"Should have implemented this\")\n\n def text(self, value):\n \"\"\"\n Converts a value to text in a way compatible with Python2 and Python 3.\n\n :param object value: The value to convert.\n :return: The value as text.\n \"\"\"\n return unicode(value) if sys.version_info < (3,) else str(value)\n\n def upper(self, value):\n \"\"\"\n Converts a value to upper case text in a way compatible with Python2 and Python 3.\n\n :param object value: The value to convert.\n :return: The value as upper case text.\n \"\"\"\n return self.text(value).upper()\n\n def lower(self, value):\n \"\"\"\n Converts a value to lower case in a way compatible with Python2 and Python 3.\n\n :param object value: The value to convert.\n :return: The value as lower case text.\n \"\"\"\n return self.text(value).lower()\n\n\nclass VSGWriteCommand(object):\n \"\"\"\n The VSGWriteCommand class presents a simple command object to execute the writing methods of a collection of VSGWritable objects.\n \"\"\"\n\n def __init__(self, logname, writables, parallel=True):\n \"\"\"\n Initializes the instance with an default values.\n\n :param str logname: The python logger log name.\n :param list writables: The list of VSGWritable class instances.\n :param bool parallel: Flag to enable asynchronous writing.\n \"\"\"\n self._logname = logname\n self._writables = writables\n self._parallel = parallel\n writables_names = set([w.__writable_name__ for w in writables])\n if not writables_names:\n self._message = \"Writing no files.\"\n elif len(writables_names) == 1:\n self._message = \"Writing {0}{1}\".format(next(iter(writables_names)), 's' if len(writables) > 1 else '')\n else:\n self._message = \"Writing a mixed collection of files.\"\n\n def __enter__(self):\n \"\"\"\n Enter the runtime context related to this object.\n \"\"\"\n return self\n\n def __exit__(self, exc_type, exc_value, exc_traceback):\n \"\"\"\n Exit the runtime context related to this object.\n \"\"\"\n # Only return True to surpress the exception (if any)\n return False\n\n def execute(self):\n \"\"\"\n Executes the command.\n \"\"\"\n from vsgen.util.logger import VSGLogger\n\n VSGLogger.info(self._logname, self._message)\n start = time.clock()\n VSGWriter.write(self._writables, self._parallel)\n end = time.clock()\n VSGLogger.info(self._logname, \"Wrote %s files in %s seconds:\", len(self._writables), end - start)\n\n\nclass VSGWriter(threading.Thread):\n \"\"\"\n VSGWriter encapsulates the logic needed to write any VSG object to disk.\n \"\"\"\n\n def __init__(self, pylist):\n \"\"\"\n VSGProject encapsulates the logic needed to create a *.pyproject file.\n\n :param list pylist: A list of VSG objects[PrProjects, VSGSolutions, etc]\n \"\"\"\n threading.Thread.__init__(self)\n if not hasattr(pylist, '__iter__'):\n self._pylist = [pylist]\n else:\n self._pylist = pylist\n\n def run(self):\n \"\"\"\n The Thread's execution function.\n \"\"\"\n for pyitem in self._pylist:\n pyitem.write()\n\n @staticmethod\n def write(pylist, parallel=True):\n \"\"\"\n Utility method to spawn a VSGWriter for each element in a collection.\n\n :param list pylist: A list of VSG objects (PrProjects, VSGSolutions, etc)\n :param bool parallel: Flag to enable asynchronous writing.\n \"\"\"\n threads = [VSGWriter(o) for o in pylist]\n if parallel:\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n else:\n for t in threads:\n t.run()\n", "repo_name": "dbarsam/python-vsgen", "sub_path": "vsgen/writer.py", "file_name": "writer.py", "file_ext": "py", "file_size_in_byte": 5251, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "94", "api": [{"api_name": "os.path.normpath", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 26, "usage_type": "call"}, {"api_name": "errno.EEXIST", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "jinja2.FileSystemLoader", "line_number": 32, "usage_type": "call"}, {"api_name": "jinja2.Environment", "line_number": 33, "usage_type": "call"}, {"api_name": "sys.version_info", "line_number": 60, "usage_type": "attribute"}, {"api_name": "vsgen.util.logger.VSGLogger.info", "line_number": 124, "usage_type": "call"}, {"api_name": "vsgen.util.logger.VSGLogger", "line_number": 124, "usage_type": "name"}, {"api_name": "time.clock", "line_number": 125, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 127, "usage_type": "call"}, {"api_name": "vsgen.util.logger.VSGLogger.info", "line_number": 128, "usage_type": "call"}, {"api_name": "vsgen.util.logger.VSGLogger", "line_number": 128, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 131, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 142, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 142, "usage_type": "attribute"}]} +{"seq_id": "13831848086", "text": "import tkinter as tk \nfrom tkinter import *\nfrom ca_func import *\nfrom PIL import ImageTk, Image, ImageDraw\n#import ttk\nimport types\nfrom numpy import base_repr\n\nmainWindow = Tk()\nmainWindow.grid()\nmainWindow.grid_propagate(1)\n\nclass MatrixButton(tk.Button):\n INSTANCE_NUM = 0\n def __init__(self, *args, **kwargs):\n# print (\"len(*args)\")\n# print (len(args))\n# print (args[1][0])\n# print (MatrixButton.INSTANCE_NUM)\n tk.Button.__init__(self, *args[0:1], **kwargs)\n self.value = 0 \n self.ID = self.INSTANCE_NUM\n MatrixButton.INSTANCE_NUM += 1\n# self.imgON = ImageTk.PhotoImage(args[1])\n# print (\"self.imgON\")\n# print (args[1][1])\n# args[1][1].save(r\"C:/testPix3b.png\")\n# args[1][0].save(r\"C:/testPix3a.png\")\n self.imgs = [args[1][0], args[1][1]]\n# print (\"self.imgs-assigned\")\n \n def switchValue(self):\n if self.value == 0:\n self.setValue(1)\n elif self.value == 1:\n self.setValue(0)\n self.configure(bg = \"red\")\n \n def selectionOff(self):\n self.configure(bg = \"Gray80\")\n self.setImage()\n\n def setImage(self, *args):\n if len(args) !=0:\n self.imgs = args[0]\n #print (\"MatrixButton->setImage()->self.imgs was set to {0} and {1}\".format(args[0][0], args[0][1]))\n self.config(image = self.imgs[self.value])\n\n def reset(self):\n self.configure()\n\n def setValue(self, inValue):\n self.value = inValue\n self.setImage()\n def getValue(self):\n return self.value\n def getID(self):\n return self.ID\n\nclass InitialMatrixButton(MatrixButton):\n INSTANCE_NUM = 0\n def __init__(self, *args, **kwargs):\n MatrixButton.__init__(self, *args, **kwargs)\n self.ID = self.INSTANCE_NUM\n InitialMatrixButton.INSTANCE_NUM += 1\n\nclass CaAPP(tk.Frame):\n HEX_RULE = \"\" \n BIN_RULE = \"\"\n INT_RULE = \"\"\n ALPHA_NUM_RULE = \"\"\n def __init__(self, master):\n tk.Frame.__init__(self, master, bg = \"yellow\", bd = 1)\n self.grid(sticky=tk.N+tk.S+tk.E+tk.W)\n self.createWidgets()\n self.setGlobals()\n self.caResX = 51\n self.caResY = 51\n self.setPaths()\n self.rbVar = tk.IntVar()\n self.rbVar.set(0)\n self.imRbVar = tk.IntVar()\n self.imRbVar.set(0)\n self.ruleStrVar = tk.StringVar()\n self.myLogData = readCSV_Log(self.logReadPath)\n self.getMatrixImgs()\n self.width = 1920\n self.height = 1000\n self.bgColor = \"Gray16\"\n self.text1Color = \"white\"\n self.text2Color = \"Gray60\"\n # self.config(height = 800)\n master.geometry(\"\" + str(self.width) + \"x\" + str(self.height))\n master.resizable(1, 1)\n self.pack(fill=BOTH, expand=1)\n self.matrixButtonsToChange = [] \n self.currentRow = 0\n self.grid(sticky=N+S+E+W)\n self.intialMatrixWidth = 25\n \n self.cols = 89 - self.intialMatrixWidth\n# self.createWidgets()\n #self.createMenu()\n self.createInspectorMatrixFrame() \n self.caLayCount = 30\n self.caCols = 20\n self.caCanvasXscale = 1.0\n self.caCanvasYscale = 1.0\n self.caCanvasScaleInc = 0.1\n self.create_CA_Canvas()\n self.initButtonSize = 13\n self.createInitialImageCanvas()\n \n self.initialMatrixToChange = [] \n self.initialImageMatrix = self.createInitialImageMatrix()\n self.createMenuBar()\n \n self.updateCanvas()\n \n def createWidgets(self):\n top = self.winfo_toplevel()\n top.rowconfigure(0, weight=1)\n top.columnconfigure(0, weight=1)\n self.rowconfigure(0, weight=1)\n self.columnconfigure(0, weight=1)\n\n def getMatrixImgs(self):\n myLogData = readCSV_Log(self.logReadPath)\n hexRuleON = \"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"\n binRuleON = getBinRuleFromHex(hexRuleON)\n PILimagesON = getCrossImgs(binRuleON, getInspArraySeqence(9), 2, myLogData)\n hexRuleOFF = \"0\"\n binRuleOFF = getBinRuleFromHex(hexRuleOFF)\n PILimagesOFF = getCrossImgs(binRuleOFF, getInspArraySeqence(9), 2, myLogData)\n print (\"PIL images created\")\n self.matrixButtonImgs = []\n for i, img in enumerate(PILimagesON):\n# imageON = ImageTk.PhotoImage(img)\n# imageOFF = ImageTk.PhotoImage(PILimagesOFF[i])\n self.matrixButtonImgs.append([PILimagesOFF[i], img])\n # print (self.matrixButtonImgs)\n # print (\"matrixButtonImgs created\")\n\n def updateMatrixButtons(self):\n for i, w in enumerate(self.matrixButtons):\n w.setValue(int(self.BIN_RULE[i]))\n w.setImage(self.matrixButtonImgs[i])\n \n\n def updateInspMatrixImgs(self): \n #print (\"myLogData from path: {0} was loaded\".format(myLogDataPath))\n self.myNewInspMatrixImgs = self.matrixButtonImgs\n myCaimage = self.caIMGs[5]\n rImage = self.caPILimgs[5].resize((200,200))\n self.resizedImage = ImageTk.PhotoImage(rImage)\n for i, img in enumerate(self.myNewInspMatrixImgs):\n self.myImg = img[0]\n self.caCanvas.create_image(i*50, 0 ,image = self.myImg, anchor = tk.NW)\n #print (myCaimage)\n self.testButton = tk.Button(self.caCanvas, image = myCaimage)\n self.caCanvas.create_image(100, 0 ,image = self.resizedImage, anchor = tk.NW)\n self.testButton.grid()\n #self.buttonFrame = tk.Frame(self.caCanvas, width = 100, height = 20, bd = 0)\n #self.buttonFrame.grid(sticky = N+S+E+W)\n #self.caCanvas.create_image(0, 0 , image = myCaimage, anchor = tk.NW)\n #self.caCanvas.create_window(0,0, height = 100, width = 100)\n\n def getLogData():\n return self.myLogData\n\n def setGlobals(self):\n CaAPP.HEX_RULE = \"24a0e880a0208000020480004000000084a08408a080000000482800800000010018800000000000290808002000000028000208400200011044000000000012\" \n intRule = int(CaAPP.HEX_RULE,16)\n self.ruleStrVar = CaAPP.HEX_RULE\n CaAPP.ALPHA_NUM_RULE = base_repr(intRule, base=36)\n # print (\"setGlobals - CaAPP.HEX_RULE\")\n # print (CaAPP.HEX_RULE)\n CaAPP.BIN_RULE = getBinRuleFromHex(CaAPP.HEX_RULE)\n \n # print (\"setGlobals - CaAPP.BIN_RULE\")\n # print (CaAPP.BIN_RULE)\n # print (type(CaAPP.BIN_RULE))\n CaAPP.INT_RULE = intRule\n # print (\"setGlobals - CaAPP.INT_RULE\")\n # print (CaAPP.INT_RULE)\n \n def updateBINrule(self, inBinRule):\n CaAPP.BIN_RULE = inBinRule\n # print (\"updateBINrule() - inBinRule\" + str(int(inBinRule,2)))\n # print (\"updateBINrule() - BIN_RULE\" + str(int(CaAPP.BIN_RULE,2)))\n CaAPP.INT_RULE = int(CaAPP.BIN_RULE,2)\n CaAPP.ALPHA_NUM_RULE = base_repr(CaAPP.INT_RULE, base=36)\n CaAPP.HEX_RULE = \"{0:x}\".format(CaAPP.INT_RULE)\n \n def updateHEXrule(self, inHexRule):\n CaAPP.HEX_RULE = inHexRule \n CaAPP.BIN_RULE = getBinRuleFromHex(CaAPP.HEX_RULE) \n CaAPP.INT_RULE = int(CaAPP.BIN_RULE,2)\n CaAPP.ALPHA_NUM_RULE = base_repr(CaAPP.INT_RULE, base=36)\n self.ruleStrVar = CaAPP.ALPHA_NUM_RULE\n \n def updateANUMrule(self, inNumRule):\n CaAPP.ALPHA_NUM_RULE = inNumRule \n self.ruleStrVar = inNumRule\n CaAPP.INT_RULE = int(inNumRule,36) \n CaAPP.HEX_RULE = \"{:x}\".format(CaAPP.INT_RULE)\n CaAPP.BIN_RULE = getBinRuleFromHex(CaAPP.HEX_RULE)\n \n def setPaths(self):\n self.CSV_logPath = r\"C:DANO/_WORK/DATA/PYTHON/CELULAR_AUTOMAT-2D/LOG/\"\n self.saveCAimagesPath = r\"C:/DANO/_WORK/DATA/PYTHON/CELULAR_AUTOMAT-2D/RNDCA2D9-sequence/interesting/\"\n #self.logReadPath = self.CSV_logPath + \"CA2D9_\" + self.HEX_RULE + \"_\" + str(self.caResX) + \"x\" + str(self.caResY) + \".csv\"\n self.logReadPath = self.getLOGpath(CaAPP.HEX_RULE)\n\n def getLOGpath(self,inFileName): \n returnPath = self.CSV_logPath + \"CA2D9_\" + inFileName + \"_\" + str(self.caResX) + \"x\" + str(self.caResY) + \".csv\"\n return returnPath\n\n def generateCAimgs(self):\n self.caPILimgs = runWithLayer(self.createInitialImage(), self.caResX, self.caResY, self.caLayCount, CaAPP.BIN_RULE, self.logReadPath, True)\n self.caIMGs = []\n for img in self.caPILimgs:\n self.caIMGs.append(ImageTk.PhotoImage(img))\n\n # def createWidgets(self):\n # self.quitButton = tk.Button(self, text = \"Quit\", command = self.quit)\n # self.quitButton.grid(columnspan = self.cols)\n \n # def setBINRuleByHEX(self, inHex):\n # # print (\"setBINRuleByHEX - inHex\")\n # # print (inHex)\n # self.BIN_RULE = getBinRuleFromHex(inHex)\n # pass\n \n def addMatrixSelection(self, udalost):\n #self.BIN_RULE[inPosition] = self.switchValue(self.BIN_RULE[inPosition])\n udalost.widget.switchValue()\n if udalost.widget.ID in self.matrixButtonsToChange:\n self.matrixButtonsToChange.remove(udalost.widget.ID)\n udalost.widget.selectionOff()\n \n else:\n self.matrixButtonsToChange.append(udalost.widget.ID)\n # print (\"Added to matrixButtonsToChange \" + str(udalost.widget.ID))\n # print (self.matrixButtonsToChange)\n # print (\"hodnota value\")\n # print (udalost.widget.value)\n # print (\"MatrixButton ID\")\n # print (udalost.widget.ID)\n\n def addInitialMatrixSelection(self, udalost):\n myButton = udalost.widget\n myConf = myButton.config()\n stateValues = myConf['state']\n stateValue = stateValues[len(stateValues)-1]\n print ('stateValue of button {0} = {1}'.format(myButton.ID, stateValue))\n# print (stateValue)\n if stateValue != \"disabled\":\n udalost.widget.switchValue()\n self.updateInitialImageMatrix(udalost.widget.getID(), udalost.widget.getValue())\n print (\"mozes kludne\")\n\n else:\n print (\"nene nezmenis hodnotu\")\n pass\n # self.initialMatrixButtons\n\n def createInitialImageMatrix(self):\n returnArray = []\n for i in self.initialMatrixButtons:\n returnArray.append(i.getValue())\n return returnArray\n\n def updateInitialImageMatrix(self, inIndex, inValue):\n self.initialImageMatrix[inIndex] = inValue\n print (\"initialImageMatrix\")\n print (self.initialImageMatrix)\n \n def createInitialImage(self):\n img = Image.new(\"RGBA\",(self.caResX,self.caResY), (0,0,0,255))\n if self.rbVar.get() == 0:\n img = createFirstLayerFromScratch(self.caResX, self.caResY)\n else:\n returnPixCoords = []\n for i, v in enumerate(self.initialImageMatrix):\n if v == 1:\n returnPixCoords.append(get2DpixCoords(i, self.intialMatrixWidth))\n imgI = Image.new(\"RGBA\",(self.intialMatrixWidth,self.intialMatrixWidth), (0,0,0,255))\n imgID = ImageDraw.Draw(imgI)\n imgID.point(returnPixCoords, (255, 255, 255,255))\n #img.paste(imgI, (int((abs((self.intialMatrixWidth-self.caResX)/2), int((abs(self.intialMatrixWidth-self.caResY)/2)))))\n img.paste(imgI, (int(self.caResX/2 - self.intialMatrixWidth/2), int(self.caResY/2 - self.intialMatrixWidth/2)))\n #img.save(r\"H:/_WORK/PYTHON/CELULAR_AUTOMAT-2D/pasteTest.png\")\n print (\"pasteTest.png saved\")\n print ('self.rbVar - {}'.format(self.rbVar.get()))\n return img\n\n def updateRule(self, udalost):\n# updatedRule = self.switchRuleValues(self.BIN_RULE)\n # print (\"self.INT_RULE\")\n # print (self.INT_RULE)\n print (\"len(self.matrixButtonsToChange)\")\n print (len(self.matrixButtonsToChange))\n print (\"udalost.widget.cget(text)\")\n print (udalost.widget.cget('text'))\n self.caLayCount = int(self.caLayCountSB.get())\n if udalost.widget.cget('text') == 'Change Rule':\n self.updateANUMrule(self.ruleEntry.get())\n else:\n if len(self.matrixButtonsToChange) != 0:\n print (\"len(self.matrixButtonsToChange)\")\n print (len(self.matrixButtonsToChange))\n updatedRule = self.getNewRule()\n self.updateBINrule(updatedRule)\n else:\n updatedRule = self.ruleEntry.get()\n self.updateANUMrule(updatedRule)\n\n self.setPaths()\n print (\"Path to LOG file was updated to : {0}\".format(self.logReadPath))\n self.generateCAimgs()\n self.getMatrixImgs()\n #self.updateInspMatrixImgs() \n self.updateMatrixButtons()\n if len(self.matrixButtonsToChange) != 0: \n # self.createInspectorMatrixFrame()\n for i in self.matrixButtonsToChange: \n self.matrixButtons[i].selectionOff()\n self.matrixButtons[i].setImage()\n elif udalost.widget.cget('text') == 'Change by entry Rule': \n for i, w in enumerate(self.matrixButtons):\n w.setValue(int(CaAPP.BIN_RULE[i]))\n self.matrixButtonsToChange = []\n \n self.label2.configure(text = \"BIN RULE: \" + CaAPP.BIN_RULE)\n self.label3.configure(text = \"INT RULE: \" + str(CaAPP.INT_RULE))\n \n ruleEntryText = self.ruleEntry.get() \n print (ruleEntryText)\n self.ruleEntry.delete(0, len(ruleEntryText)) \n self.ruleEntry.insert(0,CaAPP.ALPHA_NUM_RULE)\n self.updateCanvas()\n self.initialImageMatrix = self.createInitialImageMatrix()\n self.createInitialImage()\n # print (\"self.INT_RULE\")\n # print (self.INT_RULE)\n\n def updateCanvas(self): \n self.caCanvas.delete(\"all\")\n yOffset = 50\n rImage = self.caPILimgs[8].resize((200, 200))\n self.resizedImage = ImageTk.PhotoImage(rImage)\n myCaimage = self.caIMGs[5]\n self.pImage = self.caIMGs[0]\n self.caIMGs = []\n for i, image in enumerate(self.caPILimgs):\n imgXsize = image.size[0]\n #print (\"imgXSize = {0}, resized = {1}\".format(imgXsize, int(imgXsize * self.caCanvasXscale)))\n #print (self.caCanvasXscale)\n imgYsize = image.size[1]\n rImage = image.resize((int(imgXsize * self.caCanvasXscale), int(imgYsize * self.caCanvasYscale)))\n self.resizedCAImage = ImageTk.PhotoImage(rImage)\n self.caIMGs.append(self.resizedCAImage)\n self.caCanvas.create_image((i % self.caCols) * self.resizedCAImage.width(), (i / self.caCols) * self.resizedCAImage.height(), image = self.caIMGs[i], anchor = tk.NW)\n \n # #imgXsize = image.size[0]\n # #imgYsize = image.size[1]\n # #resImg = image.resize((int(imgXsize * self.caCanvasXscale), int(imgYsize * self.caCanvasYscale)), Image.ANTIALIAS)\n # self.resImg = self.caPILimgs[i].resize((int(self.caCanvasXscale), int(self.caCanvasYscale)))\n # #pImage = ImageTk.PhotoImage(image)\n # self.pImage = ImageTk.PhotoImage(self.resImg)\n # print (\"photoImage width - {w} - height {h}\".format(w = self.pImage.width(), h = self.pImage.height()))\n # print (self.pImage)\n # #self.caCanvas.create_image((i % self.caCols)*self.pImage.width(), yOffset + ((i / self.caCols) * self.pImage.height()) , image = self.pImage, anchor = tk.NW)\n\n\n def caCanvasZoom(self, udalost):\n print (udalost.delta)\n if udalost.delta>0: print (\"ZOOM IN!\")\n elif udalost.delta<0: print (\"ZOOM OUT!\")\n \n def caCanvasGrab(self, udalost):\n self._y = udalost.y\n self._x = udalost.x\n \n def caCanvasDrag(self, udalost):\n if (self._y-udalost.y < 0): self.caCanvas.yview(\"scroll\",-1,\"units\")\n elif (self._y-udalost.y > 0): self.caCanvas.yview(\"scroll\",1,\"units\")\n if (self._x-udalost.x < 0): self.caCanvas.xview(\"scroll\",-1,\"units\")\n elif (self._x-udalost.x > 0): self.caCanvas.xview(\"scroll\",1,\"units\")\n self._x = udalost.x\n self._y = udalost.y\n \n def updateCaLaycountSB(self, udalost): \n self.caLayCount = int(udalost.widget.get())\n print (type(self.caLayCount))\n print (self.caLayCount)\n print (\"self.caLayCount was updated\")\n print (int(udalost.widget.winfo_geometry().split(\"x\")[0]))\n\n def zoomIn(self, udalost):\n #self.caCanvas.delete(\"all\")\n self.caCanvasXscale += self.caCanvasScaleInc\n self.caCanvasYscale += self.caCanvasScaleInc\n print (\"zoomIn->self.caCanvasXscale = \" + str(self.caCanvasXscale))\n self.caCanvas.scale(\"all\", 0, 0, 1 + self.caCanvasScaleInc, 1 + self.caCanvasScaleInc)\n self.updateCanvas()\n #self.updateCanvas()\n \n def zoomOut(self, udalost):\n #self.caCanvas.delete(\"all\")\n self.caCanvasXscale = max(self.caCanvasXscale - self.caCanvasScaleInc, 1)\n self.caCanvasYscale = max(self.caCanvasYscale - self.caCanvasScaleInc, 1)\n self.caCanvas.scale(\"all\", 0,0, 1 - self.caCanvasScaleInc, 1 - self.caCanvasScaleInc)\n self.updateCanvas()\n #updateCanvas()\n\n def getNewRule(self):\n returnRule = \"\"\n for i in self.matrixButtons:\n returnRule += str(i.value)\n # print (\"getNewRule() - returnRule\" + str(int(returnRule,2)))\n # print (\"getNewRule() - BIN_RULE\" + str(int(self.BIN_RULE,2)))\n return returnRule\n\n def switchRuleValues(self, inRule):\n \n if type(inRule) is types.StringType:\n returnRule = \"\"\n # print (\"type = StringType\")\n for i, v in enumerate(inRule):\n if i in self.matrixButtonsToChange:\n if v==\"0\":\n returnRule += \"1\"\n elif v == \"1\":\n returnRule += \"0\"\n else:\n returnRule += v\n return returnRule\n \n elif type(inRule) is types.ListType:\n returnRule = inRule\n returnStrRule = \"\"\n # print (\"type = ListType\")\n for j in self.matrixButtonsToChange:\n returnRule[j] = (inRule[j]-1) * (-1)\n returnStrRule.join(returnRule)\n return returnStrRule\n \n def getIntFromInitialImg(self):\n binVal = \"\"\n sPos = 0\n ePos = 0\n zeroSpos = False\n for i, w in enumerate(self.initialMatrixButtons):\n binVal += str(w.getValue())\n if w.getValue() == 1 and i == 0:\n print (\"getHexFromInitialImg - i:\")\n print (i)\n sPos = i\n zeroSpos = True\n ePos = i\n elif w.getValue() == 1 and sPos == 0 and zeroSpos == False:\n sPos = i\n ePos = i\n elif w.getValue() == 1 and i!= 0:\n ePos = i\n\n print (\"sPos\")\n print (sPos)\n print (\"ePos\")\n print (ePos)\n print (binVal[sPos:ePos])\n if sPos == len(binVal) - 1:\n sPos = len(binVal) - 2\n if ePos == len(binVal) - 1:\n ePos = len(binVal) - 2\n intVal = int(binVal[sPos:((ePos + 1) % len(binVal))],2)\n print (\"intVal\")\n print (intVal)\n hexVal = \"{0:x}\".format(intVal) \n print (\"hexVal\")\n print (hexVal)\n return intVal\n\n def shortenName(self,inMaxLength, inStr):\n dirPathLength = len(os.getcwd())\n dirLength = len(inStr)\n newDirLength = inMaxLength - dirPathLength\n dirLengthDifference = dirLength - newDirLength\n myStrList = [x for x in inStr]\n if dirLengthDifference > 0:\n popStep = dirLength / dirLengthDifference\n mySliceList = range(0,len(myStrList), popStep)\n for i, v in enumerate(mySliceList):\n myStrList.pop(v-i)\n return \"\".join(myStrList)\n\n def writeFile(self, inData, inFullName):\n with open(inFullName, 'w') as myFile:\n myFile.write(inData)\n\n def createSCADconvex(self, inData, inWrightDirPath, inWidth, inHeight):\n #splitedPath = inDirName.split(\"/\")\n #print splitedPath[len(splitedPath)-1]\n returnString = \"\\n\"\n returnString += \"sphereRadius = 0.1; \\n\"\n returnString += \"wallWidth = 4; \\n\"\n returnString += \"$fn = 2; \\n\"\n returnString += \"zStart = 0;\\n\"\n returnString += \"zMax = 60;\\n\"\n returnString += \"\\n\"\n returnString += \"//for(z=[zStart:(len(myBoolMx)-1)]) \\n\"\n returnString += \"for(z=[zStart: (len(myBoolMx)-1) < zMax ? (len(myBoolMx)-1) : zMax])\\n\"\n returnString += \"{ \\n\"\n returnString += \" for(y=[0:(len(myBoolMx[z])-1)]) \\n\"\n returnString += \" { \\n\"\n returnString += \" for(x=[0: len(myBoolMx[z][y])-1]) \\n\"\n returnString += \" { \\n\"\n returnString += \"//substracted void from inner structure by wallWidth parameter \\n\"\n returnString += \" if(z(len(myBoolMx[z]) - (wallWidth +2)) || (x < wallWidth || x>(len(myBoolMx[z][y]) - (wallWidth + 2)))))) \\n\"\n returnString += \"//substracted void from surface offseted by wallWidth parameter \\n\"\n returnString += \"// if(zwallWidth + 1 && y<(len(myBoolMx[z]) - (wallWidth + 2)) && (x > wallWidth + 1 && x<(len(myBoolMx[z][y]) - (wallWidth + 2)))))) \\n\"\n returnString += \"//middle cross section XZ plane\\n\"\n returnString += \"// if(zlen(myBoolMx[z][y])-x && x<(len(myBoolMx[z][y])))\\n\"\n returnString += \"//diagonal section 2\\n\"\n returnString += \"// if(zy)\\n\"\n returnString += \"//full volume structure\\n\"\n returnString += \"// if(z 0) {color(\\\"blue\\\") translate(point1) sphere(sphereRadius);}\\n\"\n returnString += \" if(len(point2) > 0) {color(\\\"blue\\\") translate(point2) sphere(sphereRadius);}\\n\"\n returnString += \" if(len(point3) > 0) {color(\\\"blue\\\") translate(point3) sphere(sphereRadius);}\\n\"\n returnString += \" if(len(point4) > 0) {color(\\\"blue\\\") translate(point4) sphere(sphereRadius);}\\n\" \n returnString += \" if(len(point5) > 0) {color(\\\"blue\\\") translate(point5) sphere(sphereRadius);}\\n\" \n returnString += \" if(len(point6) > 0) {color(\\\"blue\\\") translate(point6) sphere(sphereRadius);}\\n\"\n returnString += \" if(len(point7) > 0) {color(\\\"blue\\\") translate(point7) sphere(sphereRadius);}\\n\"\n returnString += \" if(len(point8) > 0) {color(\\\"blue\\\") translate(point8) sphere(sphereRadius);}\\n\" \n returnString += \" }\\n\"\n returnString += \" }\\n\"\n returnString += \" }\\n\"\n returnString += \" }\\n\"\n returnString += \"}\\n\"\n returnString += \"\\n\"\n returnString += \"myBoolMx = [\"\n for b, z in enumerate(inData):\n returnString += \"[\"\n layer = z.getdata()\n for a, y in enumerate(layer):\n if a%inWidth == 0 and a == 0:\n returnString += \"[\" + str(returnOne(y[0])) + \", \"\n elif a%inWidth == 0 and a > 0:\n returnString += \"], [\" + str(returnOne(y[0])) + \", \"\n elif a%inWidth == inWidth - 1 and a > 0:\n returnString += str(returnOne(y[0]))\n else:\n returnString += str(returnOne(y[0])) + \", \"\n if a == len(layer)-1:\n returnString += \"]\"\n # returnString += \"-\" + str(a) + \"-\"\n if b != len(inData)-1:\n returnString += \"],\"\n else:\n returnString += \"]\"\n returnString += \"]; \\n\"\n \n #fileName = splitedPath[len(splitedPath)-1] + \"x\" + str(len(inData)) +\"_convexHull.scad\"\n fileName = \"{0}x{1}_{2}_convexHull.scad\".format(inWidth, inHeight, len(inData))\n fullName = inWrightDirPath + \"\\\\\" + fileName\n self.writeFile(returnString, fullName)\n print (\"saved :\" + fullName)\n return returnString\n \n def createTextForSCAD(self, inData, inWrightDirPath, inWidth, inHeight):\n #splitedPath = inDirName.split(\"/\")\n #print splitedPath[len(splitedPath)-1]\n returnString = \" \\n\"\n returnString += \"wallWidth = 4;\\n\" \n returnString += \"zStart = 0;\\n\"\n returnString += \"zMax = 60;\\n\"\n returnString += \"for(z=[zStart: (len(myBoolMx)-1) < zMax ? (len(myBoolMx)-1) : zMax]) \\n\"\n returnString += \"{\\n\"\n returnString += \" for(y=[0:(len(myBoolMx[z])-1)]) \\n\"\n returnString += \" { \\n\"\n returnString += \" for(x=[0:(len(myBoolMx[z][y])-1)]) \\n\"\n returnString += \" { \\n\"\n returnString += \"//substracted void from inner structure offseted by wallWidth parameter \\n\"\n returnString += \" if(myBoolMx[z][y][x] == 1 && (z(len(myBoolMx[z]) - (wallWidth + 2)) || (x < wallWidth + 1 || x>(len(myBoolMx[z][y]) - (wallWidth + 2))))))) \\n\"\n returnString += \"//substracted void from surface offseted by wallWidth parameter \\n\"\n returnString += \"// if(myBoolMx[z][y][x] == 1 && ((y>wallWidth + 1 && y<(len(myBoolMx[z]) - (wallWidth + 2)) && (x > wallWidth + 1 && x<(len(myBoolMx[z][y]) - (wallWidth + 2)))))) \\n\"\n returnString += \"//middle cross section XZ plane\\n\"\n returnString += \"// if(myBoolMx[z][y][x] == 1 && (z(len(myBoolMx[z][y])-x)) && x<(len(myBoolMx[z][y])))\\n\"\n returnString += \"//diagonal section 2\\n\"\n returnString += \"// if(myBoolMx[z][y][x] == 1 && (zy))\\n\"\n returnString += \"//full volume structure\\n\"\n returnString += \"// if(myBoolMx[z][y][x] == 1) \\n\"\n returnString += \" { \\n\"\n returnString += \" translate([x,y,z])\\n\"\n returnString += \" { \\n\"\n returnString += \" cube(1.2); \\n\"\n returnString += \" } \\n\"\n returnString += \" } \\n\"\n returnString += \" } \\n\"\n returnString += \" } \\n\"\n returnString += \"} \\n\"\n returnString += \" \\n\"\n returnString += \"myBoolMx = [\"\n for b, z in enumerate(inData):\n returnString += \"[\"\n layer = z.getdata()\n for a, y in enumerate(layer):\n if a%inWidth == 0 and a == 0:\n returnString += \"[\" + str(returnOne(y[0])) + \", \"\n elif a%inWidth == 0 and a > 0:\n returnString += \"], [\" + str(returnOne(y[0])) + \", \"\n elif a%inWidth == inWidth - 1 and a > 0:\n returnString += str(returnOne(y[0]))\n else:\n returnString += str(returnOne(y[0])) + \", \"\n if a == len(layer)-1:\n returnString += \"]\"\n # returnString += \"-\" + str(a) + \"-\"\n if b != len(inData)-1:\n returnString += \"],\"\n else:\n returnString += \"]\"\n returnString += \"]; \\n\"\n #fileName = splitedPath[len(splitedPath)-1] + \"x\" + str(len(inData)) +\"_cube.scad\"\n fileName = \"{0}x{1}_{2}_cube.scad\".format(inWidth, inHeight, len(inData))\n fullName = inWrightDirPath + \"\\\\\" + fileName\n self.writeFile(returnString, fullName)\n print (\"saved :\" + fullName)\n return returnString\n\n def saveCAimages(self):\n ensure_dir(self.saveCAimagesPath + CaAPP.ALPHA_NUM_RULE + \"/\")\n os.chdir(self.saveCAimagesPath + CaAPP.ALPHA_NUM_RULE + \"/\")\n# print (\"saveCAimages -> inOption - {0}\".format(\"blablabla\"))\n# saveDir = self.HEX_RULE + \"/\"\n# # saveDir = self.saveCAimagesPath + base_repr(self.INT_RULE, 36) + \"/\"\n# # subDir = saveDir + \"/\" + base_repr(self.getIntFromInitialImg(), 36) + \"/\"\n# subDir = u\"\" + base_repr(self.getIntFromInitialImg(), 36)\n# print (subDir)\n# try:\n# ensure_dir(saveDir)\n# except:\n# print (\"Error creating or reading directory: {0}\".format(saveDir))\n# os.chdir(saveDir)\n \n mySubDirs = []\n for (path, dirnames, filenames) in os.walk(os.getcwd()): \n mySubDirs.extend(dirnames)\n subDir = \"{:03}\".format(int(mySubDirs[-1]) + 1 if len(mySubDirs) > 0 else 1)\n print (\"Name of subdir is : {0}\".format(subDir))\n print (mySubDirs)\n print (\"Length of path is : {0} - {1}\".format(len(os.getcwd() + subDir), os.getcwd() + \"\\\\\" + subDir))\n if len(os.getcwd() + subDir) > 260:\n subDir = self.shortenName(260, subDir)\n print (\"Length of path after shortening is : {0} - {1}\".format(len(os.getcwd() + subDir), os.getcwd() + \"\\\\\" + subDir))\n #try:\n print (subDir + \"\\\\\")\n try:\n ensure_dir(subDir + \"\\\\\")\n #os.chdir(subDir + \"/\")\n \n for i, img in enumerate(self.caPILimgs):\n # img.save(subDir + \"{:0>3}\".format(str(i)) + \".png\")\n img.save(subDir + \"\\\\\" + \"{:0>3}\".format(str(i)) + \".png\")\n print (\"Length of path is : {0} - {1}\".format(len(os.getcwd() + \"\\\\\" + subDir), os.getcwd() + \"\\\\\" + subDir))\n print (\"Files were saved into directory : {0}\".format(os.getcwd() + \"\\\\\" + subDir))\n except Exception as ex: \n print (\"Error creating or reading directory: {0}\".format(os.getcwd() + \"\\\\\" + subDir))\n #try:\n self.createTextForSCAD(self.caPILimgs, os.getcwd() + \"\\\\\" + subDir, self.caResX, self.caResY)\n self.createSCADconvex(self.caPILimgs, os.getcwd() + \"\\\\\" + subDir, self.caResX, self.caResY)\n # except Exception as ex:\n # error_type, error_instance, traceback = sys.exc_info()\n # print(\"{0} \\\n # Exception: {1} error_type: {2}, error_instance {3}, traceback -{4}\" \\\n # .format(\"Error creating SCAD data for: {0}\".format(os.getcwd() + \"\\\\\" + subDir) \\\n # ,ex \\\n # ,error_type \\\n # ,error_instance \\\n # ,traceback))\n \n\n def saveOddCAimages(self):\n os.chdir(self.saveCAimagesPath)\n saveDir = self.HEX_RULE + \"/\"\n# saveDir = self.saveCAimagesPath + base_repr(self.INT_RULE, 36) + \"/\"\n# subDir = saveDir + \"/\" + base_repr(self.getIntFromInitialImg(), 36) + \"/\"\n subDir = u\"\" + base_repr(self.getIntFromInitialImg(), 36) + \"-n/\"\n print (subDir)\n try:\n ensure_dir(saveDir)\n except:\n print (\"Error creating or reading directory: {0}\".format(saveDir))\n os.chdir(saveDir)\n try:\n ensure_dir(subDir)\n os.chdir(subDir)\n for i, img in enumerate(self.caPILimgs):\n # img.save(subDir + \"{:0>3}\".format(str(i)) + \".png\")\n if i % 2 != 0:\n img.save(\"{:0>3}\".format(str(i)) + \".png\")\n except:\n print (\"Error creating or reading directory: {0}\".format(subDir))\n \n \n def saveEvenCAimages(self):\n os.chdir(self.saveCAimagesPath)\n saveDir = self.HEX_RULE + \"/\"\n# saveDir = self.saveCAimagesPath + base_repr(self.INT_RULE, 36) + \"/\"\n# subDir = saveDir + \"/\" + base_repr(self.getIntFromInitialImg(), 36) + \"/\"\n subDir = u\"\" + base_repr(self.getIntFromInitialImg(), 36) + \"-p/\"\n print (subDir)\n try:\n ensure_dir(saveDir)\n except:\n print (\"Error creating or reading directory: {0}\".format(saveDir))\n os.chdir(saveDir)\n try:\n ensure_dir(subDir)\n os.chdir(subDir)\n for i, img in enumerate(self.caPILimgs):\n # img.save(subDir + \"{:0>3}\".format(str(i)) + \".png\")\n if i % 2 == 0:\n img.save(\"{:0>3}\".format(str(i)) + \".png\")\n except:\n print (\"Error creating or reading directory: {0}\".format(subDir))\n \n \n\n # def createMenu(self):\n # self.menuCanvas = tk.Canvas(self, bg = \"gray30\", bd = 0)\n # self.menuCanvas.grid(row=0, column=0, columnspan = 4, sticky=tk.E+tk.W+tk.N)\n # self.menuFrame = tk.Frame(self.menuCanvas)\n # self.menuFrame.grid()\n # self.menuLabel = tk.Label(self.menuFrame, text = \"HERE GONNA BE MENU BAR\")\n # self.menuLabel.grid()\n\n def createMenuBar(self):\n #self.fileMenu = tk.Menu(self)\n\n top = self.winfo_toplevel()\n print (\"createMenuBar -> self.winfo_toplevel() - {0}\".format(top[\"height\"]))\n self.menuBar = tk.Menu(top)\n top['menu'] = self.menuBar\n self.fileMenu = tk.Menu(self.menuBar, tearoff = 0)\n self.fileMenu.add_command(label='Save all CA images', command=self.saveCAimages)\n self.fileMenu.add_command(label='Save Odd CA images', command=self.saveOddCAimages)\n self.fileMenu.add_command(label='Save Even CA images', command=self.saveEvenCAimages)\n self.editMenu = tk.Menu(self.menuBar, tearoff = 0)\n self.editMenu.add_command(label='Settings')\n self.aboutMenu = tk.Menu(self.menuBar, tearoff = 0)\n self.aboutMenu.add_command(label='About')\n self.menuBar.add_cascade(label='File', menu=self.fileMenu)\n self.menuBar.add_cascade(label='Edit', menu=self.editMenu)\n self.menuBar.add_cascade(label='Help', menu=self.aboutMenu) \n print (\"createMenuBar -> top.event_info() - {0}\".format(top.event_info()))\n\n def createInspectorMatrixFrame(self):\n# self.inspectorMatrixFrame = ttk.LabelFrame(self, text = \"Inspector matrix combinations\")\n# self.inspectorMatrixFrame.grid()\n self.leftCanvas = tk.Canvas(self, bg = \"orange\", bd = 0, width = 200 , height = 50, relief = \"flat\")\n self.leftCanvas.pack(side=LEFT, fill=BOTH)\n self.rightCanvas = tk.Canvas(self, bg = \"magenta\", bd = 0, width = 200 , height = 50, relief = \"flat\")\n self.rightCanvas.pack(side=RIGHT, fill=BOTH)\n self.headCanvas = tk.Canvas(self.leftCanvas, bg = \"gray30\", bd = 0, width = 200 , height = 50, relief = \"flat\")\n #self.headCanvas.grid(row=0, column=0, sticky=tk.N)\n #self.headCanvas.grid_propagate(0)\n self.headCanvas.pack(fill=BOTH, expand=1)\n\n print([\"\\n{0} > {1}\".format(k, v) for k, v in self.headCanvas.configure().items()])\n\n self.headFrame = tk.Frame(self.headCanvas, bg =\"cyan\" , bd = 0, relief = \"flat\")\n self.headFrame.grid(row = 0, column = 0, sticky=tk.E+tk.S)\n #self.headFrame.pack(fill=BOTH, expand=1)\n # self.xButton = tk.Button(self.headFrame, text = \"hanges\", fg = self.text1Color, bg = self.bgColor)\n # self.xButton.bind(\"\", self.updateRule)\n # self.xButton.grid(row = 0, column = 0, sticky = E+W+N+S)\n \n self.label2 = tk.Label(self.headFrame, text = \"BIN RULE: \" + CaAPP.BIN_RULE, justify = \"left\", wraplength=self.width*2/3, fg = self.text1Color, bg = self.bgColor)\n self.label2.grid(sticky = W+E)\n \n self.label3 = tk.Label(self.headFrame, text = \"INT RULE: \" + str(CaAPP.INT_RULE), justify = \"left\", wraplength=self.width*2/3, fg = self.text1Color, bg = self.bgColor)\n self.label3.grid(sticky = W+E)\n \n self.headCanvas2 = tk.Canvas(self.leftCanvas, bg = \"red\", bd = 0, relief = \"flat\")\n #self.headCanvas2.grid(row=1, column=0, sticky=tk.E+tk.W+tk.N)\n self.headCanvas2.pack(fill=BOTH, expand=1)\n\n self.headFrame2 = tk.Frame(self.headCanvas2,bg = \"blue\", bd = 0)\n self.headFrame2.grid(sticky = E)\n\n self.label1 = tk.Label(self.headFrame2, text = \"ALPHA NUMERIC RULE: \", justify = \"left\", wraplength=self.width*int(2/3.5), fg = self.text1Color, bg = self.bgColor)\n self.label1.grid(row = 0, column = 0, sticky = W)\n\n self.submitMatrixButton = tk.Button(self.headFrame2, text = \"Submit by button changes\", fg = self.text1Color, bg = self.bgColor)\n self.submitMatrixButton.bind(\"\", self.updateRule)\n self.submitMatrixButton.grid(row = 0, column = 1, sticky = E)\n\n self.ruleEntry = tk.Entry(self.headFrame2, relief = \"flat\", textvariable = self.ruleStrVar, width = int(self.width*1/16), fg = \"black\", bg = \"white\")\n self.ruleEntry.grid(row = 0, column = 2, sticky = W)\n self.ruleEntry.insert(0,CaAPP.ALPHA_NUM_RULE) \n \n\n self.inspectorMatrixCanvas = tk.Canvas(self.leftCanvas, bg = \"gray30\", bd = 0, relief = \"flat\")\n #self.inspectorMatrixCanvas.grid(row=2, column=0, sticky=tk.E+tk.W+tk.N)\n self.inspectorMatrixCanvas.pack(fill=BOTH, expand=1)\n self.inspectorMatrixFrame = tk.Frame(self.inspectorMatrixCanvas, bg = self.bgColor, bd = 0, relief = \"flat\")\n self.inspectorMatrixFrame.grid(sticky = W+E)\n \n self.currentRow += 1\n\n self.buttonImage = PhotoImage(file = r\"C:\\DANO\\_WORK\\PYTHON\\projects\\CA_APP\\cross-on.gif\")\n \n self.matrixButtons = []\n \"\"\"\n create inspector matrix buttons:\n \"\"\"\n \n for i,img in enumerate(self.matrixButtonImgs):\n ruleValue = int(CaAPP.BIN_RULE[i])\n button = MatrixButton(self.inspectorMatrixFrame, self.matrixButtonImgs[i], image=img[ruleValue], bg = self.bgColor, borderwidth = 1, relief = \"flat\", overrelief = \"ridge\", padx = 0, pady = 0)\n button.value = ruleValue\n button.setImage()\n button.bind(\"\", self.addMatrixSelection) \n self.matrixButtons.append(button)\n \"\"\"\n Dislpay inspector matrix buttons\n \"\"\"\n self.inspMatrixRBaction()\n \n self.currentRow = self.currentRow + (len(self.matrixButtons)/self.cols) + 1\n\n def createInitialImageCanvas(self):\n \n self.initialImageControlsCanvas = tk.Canvas(self.rightCanvas, bg = \"gray30\", bd = 0, height = 50)\n self.initialImageControlsCanvas.grid_propagate(0)\n #self.initialImageControlsCanvas.grid(row=0, column=1, sticky=tk.W+tk.E+tk.N, rowspan = 1, columnspan = 3)\n self.initialImageControlsCanvas.pack(fill=BOTH, expand=1)\n \n \n self.initialImageControlsFrame = tk.Frame(self.initialImageControlsCanvas, bg = \"green\", bd = 0)\n self.initialImageControlsFrame.grid(columnspan =1, sticky=tk.W+tk.E+tk.N)\n #self.initialImageControlsFrame.pack(fill=BOTH, expand=1)\n\n self.changeRuleButton = tk.Button(self.initialImageControlsFrame, text = \"Change by entry Rule\", fg = self.text1Color, bg = self.bgColor)\n self.changeRuleButton.bind(\"\", self.updateRule)\n self.changeRuleButton.grid(row = 0, column = 0, columnspan = 1, sticky = W+E)\n\n self.initialImageRBon = tk.Radiobutton(self.initialImageControlsFrame, fg = self.text2Color, bg = self.bgColor,\n indicatoron = 0, text = \"Initial condition from matrix\", command=self.radiobuttonAction, variable = self.rbVar, value = 1)\n self.initialImageRBon.grid(row = 1, column = 0)\n\n self.initialImageRBoff = tk.Radiobutton(self.initialImageControlsFrame, fg = self.text2Color, bg = self.bgColor,\n indicatoron = 0, text = \"One pixel initial condition ON\", command=self.radiobuttonAction, variable = self.rbVar, value = 0)\n self.initialImageRBoff.grid(row = 1, column = 1)\n\n self.initialImageCanvas = tk.Canvas(self.rightCanvas, bg = \"gray30\", bd = 0)\n #self.initialImageCanvas.grid(row=1, column=1, sticky=tk.E+tk.W+tk.N+tk.S, columnspan = 3, rowspan = 2)\n self.initialImageCanvas.pack(fill=BOTH, expand=1)\n self.initialImageFrame = tk.Frame(self.initialImageCanvas, bg = \"cyan\", bd = 0)\n self.initialImageFrame.grid(columnspan =1)\n\n \n\n \n self.initialMatrixButtons = []\n \n imageOn = ImageTk.PhotoImage(Image.new(\"RGBA\",(self.initButtonSize, self.initButtonSize), (255, 255, 255, 255)))\n imageOff = ImageTk.PhotoImage(Image.new(\"RGBA\",(self.initButtonSize, self.initButtonSize), (0, 0, 0,255)))\n for x in range(0, self.intialMatrixWidth * self.intialMatrixWidth):\n button = InitialMatrixButton(self.initialImageFrame, (imageOff, imageOn), image=imageOff, \n width = self.initButtonSize, height=self.initButtonSize, bg = self.bgColor, borderwidth = 0, \n relief = \"flat\", overrelief = \"ridge\", padx = 0, pady = 0)\n button.value = 0\n button.setImage()\n button.bind(\"\", self.addInitialMatrixSelection) \n self.initialMatrixButtons.append(button)\n for i,img in enumerate(self.initialMatrixButtons):\n img.grid(column=i%self.intialMatrixWidth, row = (int(i/self.intialMatrixWidth)), padx=0, pady = 0)\n\n def create_CA_Canvas(self):\n self.generateCAimgs()\n self.buttonCanvas = tk.Canvas(self.leftCanvas, height = 50, bg = \"gray30\", bd = 0)\n #self.buttonCanvas.grid(row=3, column=0, sticky=tk.E+tk.W+tk.N)\n self.buttonCanvas.pack(fill=BOTH, expand=1)\n self.buttonFrame = tk.Frame(self.buttonCanvas, bd = 0)\n self.buttonFrame.grid(column = 0, row = 1, sticky = E+W)\n\n# self.testLabel = tk.Label(self.buttonFrame, text = \"BIN RULE: \" + CaAPP.BIN_RULE, justify = \"left\", wraplength=self.width-10, fg = self.text1Color, bg = self.bgColor)\n# self.testLabel.grid(sticky = W)\n\n self.caLayCountSbLabel = tk.Label(self.buttonFrame, text = \"CA Layers Count: \", justify = \"left\", fg = self.text1Color, bg = self.bgColor)\n self.caLayCountSbLabel.grid(column = 2, row = 0, sticky = W)\n \n\n self.caLayCountSB = tk.Spinbox(self.buttonFrame, text=\"CA Layers Count\", relief = \"flat\", bd = 0, fg = self.text1Color, bg = self.bgColor, from_=10, to=300)\n self.caLayCountSB.bind(\"\", self.updateCaLaycountSB)\n self.caLayCountSB.bind(\"\", self.updateCaLaycountSB)\n self.caLayCountSB.grid(column = 3, row = 0, sticky = W)\n\n\n\n\n\n self.inspMatrixButtonsOrderRBon = tk.Radiobutton(self.buttonFrame, fg = self.text2Color, bg = self.bgColor,\n indicatoron = 0, text = \"Ordered sequence\", command=self.inspMatrixRBaction, variable = self.imRbVar, value = 0)\n self.inspMatrixButtonsOrderRBon.grid(row = 0, column = 5)\n\n self.inspMatrixButtonsOrderRBoff = tk.Radiobutton(self.buttonFrame, fg = self.text2Color, bg = self.bgColor,\n indicatoron = 0, text = \"Grouped symetries\", command=self.inspMatrixRBaction, variable = self.imRbVar, value = 1)\n self.inspMatrixButtonsOrderRBoff.grid(row = 0, column = 6)\n\n self.inspMatrixButtonsOrderRBasym = tk.Radiobutton(self.buttonFrame, fg = self.text2Color, bg = self.bgColor,\n indicatoron = 0, text = \"Grouped asymetries\", command=self.inspMatrixRBaction, variable = self.imRbVar, value = 2)\n self.inspMatrixButtonsOrderRBasym.grid(row = 0, column = 8)\n\n self.inspMatrixButtonsOrder2axisSym = tk.Radiobutton(self.buttonFrame, fg = self.text2Color, bg = self.bgColor,\n indicatoron = 0, text = \"Grouped 2 and 4 axis symetries\", command=self.inspMatrixRBaction, variable = self.imRbVar, value = 3)\n self.inspMatrixButtonsOrder2axisSym.grid(row = 0, column = 7)\n\n \n\n # self.saveImgsButton = tk.Button(self.buttonFrame, text = \"Save CA images\", fg = self.text1Color, bg = self.bgColor)\n # self.saveImgsButton.bind(\"\", self.saveCAimages)\n # self.saveImgsButton.grid(row = 0, column = 10, sticky = E)\n # (print \"create_CA_Canvas -> self.saveImgsButton.event_info() - {0}\".format(self.saveImgsButton.event_info(virtual=None)))\n\n\n self.currentRow += 1\n\n self.caCanvas = tk.Canvas(self.leftCanvas, bg = \"gray40\")\n self.caCanvas.bind(\"\", self.caCanvasZoom)\n self.caCanvas.bind(\"\", self.caCanvasGrab)\n self.caCanvas.bind(\"\", self.caCanvasDrag)\n #self.caCanvas.grid(row=4, column=0, sticky=tk.E+tk.W+tk.N, columnspan = 4)\n self.caCanvas.pack(side=LEFT, expand=1)\n \n self.caCanvas.addtag_all(\"CA\")\n self.caCanvas.config(scrollregion=self.caCanvas.bbox(ALL))\n\n self.zoomInButton = tk.Button(self.buttonFrame, text = \"+\", fg = self.text1Color, bg = self.bgColor)\n self.zoomInButton.bind(\"\", self.zoomIn)\n self.zoomInButton.grid(row = 0, column = 0, sticky = W)\n\n self.zoomOutButton = tk.Button(self.buttonFrame, text = \"-\", fg = self.text1Color, bg = self.bgColor)\n self.zoomOutButton.bind(\"\", self.zoomOut)\n self.zoomOutButton.grid(row = 0, column = 1, sticky = W)\n \n \n# self.caCanvas.itemconfigure(myCanvWinID, window = )\n\n \n self.scrollY = tk.Scrollbar(self.leftCanvas, orient=tk.VERTICAL, bg = \"black\", command=self.caCanvas.yview)\n #self.scrollY.grid(row=4, column=3, sticky=tk.N+tk.S+tk.E)\n self.scrollY.pack(fill=Y,side=LEFT, expand=1)\n self.currentRow += 1\n self.scrollX = tk.Scrollbar(self.leftCanvas, orient=tk.HORIZONTAL, command=self.caCanvas.xview)\n #self.scrollX.grid(row=5, column=0, sticky=tk.E+tk.W)\n self.scrollX.pack(fill=BOTH, expand=1)\n self.currentRow += 1\n \n def radiobuttonAction(self):\n print (self.rbVar.get())\n myConfig = self.initialMatrixButtons[0].config()\n initialBGcolor = myConfig['background'][len(myConfig['background'])-1]\n print (\"len(initialBGcolor)\")\n print (initialBGcolor)\n for i in self.initialMatrixButtons:\n if self.rbVar.get() == 0:\n i.config(state = DISABLED)\n i.config(bg = initialBGcolor)\n i.setValue(0)\n\n else:\n i.config(state = NORMAL)\n pass\n\n def inspMatrixRBaction(self):\n for i in self.matrixButtons:\n i.grid_forget()\n print (self.imRbVar.get())\n if self.imRbVar.get() == 0:\n self.gridOrderedSequence()\n elif self.imRbVar.get() == 1:\n self.gridBySymGroups()\n elif self.imRbVar.get() == 2:\n self.gridByAsymGroups()\n elif self.imRbVar.get() == 3:\n self.gridBy2axisSymetryGroups()\n\n def gridOrderedSequence(self):\n for i,but in enumerate(self.matrixButtons):\n but.grid(column=i%self.cols, row = int(i/self.cols), padx=0, pady = 0) \n but.configure(relief = \"flat\")\n\n def gridBySymGroups(self):\n self.symetryOneAxisGroups = getSymetryOneAxisGroups()\n self.buttonFramesLev1 = []\n print (\"matrixButton.winfo_name() - {c}\".format(c = self.matrixButtons[0].winfo_name()))\n for i, lev1 in enumerate(self.symetryOneAxisGroups):\n self.buttonFrameLev1 = tk.Frame(self.buttonFrame, bd = 0)\n self.buttonFrameLev1.grid(row = i)\n self.buttonFramesLev1.append(self.buttonFrameLev1) \n for j, lev2 in enumerate(lev1):\n print(lev2)\n print (\"level 1 count - {count1}; level 2 count - {count2}\".format(count1 = i, count2 = j))\n for b, pos in enumerate(lev2):\n myButton = self.matrixButtons[lev2[b]]\n myButton.grid(column = b+(len(lev2)*j), row = i)\n if j % 2 == 0:\n myButton.configure(relief = \"groove\")\n else:\n myButton.configure(relief = \"flat\")\n pass\n\n def gridBy2axisSymetryGroups(self):\n self.symetryTwoAxisGroups = getSym2axisGroup()\n self.buttonFramesLev1 = []\n for i, lev1 in enumerate(self.symetryTwoAxisGroups):\n self.buttonFrameLev1 = tk.Frame(self.buttonFrame, bd = 0)\n self.buttonFrameLev1.grid(row = i)\n self.buttonFramesLev1.append(self.buttonFrameLev1) \n for j, lev2 in enumerate(lev1):\n print(lev2)\n print (\"level 1 count - {count1}; level 2 count - {count2}\".format(count1 = i, count2 = j))\n for b, pos in enumerate(lev2):\n myButton = self.matrixButtons[lev2[b]]\n myButton.grid(column = b+(len(lev2)*j), row = i)\n if j % 2 == 0:\n myButton.configure(relief = \"groove\")\n else:\n myButton.configure(relief = \"flat\")\n pass\n\n def gridByAsymGroups(self):\n self.asymetryGroups = getAsymetryGroups()\n self.buttonFramesLev1 = []\n print (\"matrixButton.winfo_name() - {c}\".format(c = self.matrixButtons[0].winfo_name()))\n for i, lev1 in enumerate(self.asymetryGroups):\n self.buttonFrameLev1 = tk.Frame(self.buttonFrame, bd = 0)\n self.buttonFrameLev1.grid(row = i)\n self.buttonFramesLev1.append(self.buttonFrameLev1) \n for j, lev2 in enumerate(lev1):\n print(lev2)\n print (\"level 1 count - {count1}; level 2 count - {count2}\".format(count1 = i, count2 = j))\n for b, pos in enumerate(lev2):\n myButton = self.matrixButtons[lev2[b]]\n myButton.grid(column = b+(len(lev2)*j), row = i)\n if j % 2 == 0:\n myButton.configure(relief = \"groove\")\n else:\n myButton.configure(relief = \"flat\")\n pass\n\n\n\n \n\n#mainWindow.geometry(\"500x500\")\napp = CaAPP(mainWindow)\napp.master.title(\"Celular automaton Application\")\n\n#caCanvas = tk.Canvas(app, height = 650, bg = \"black\", )\n#caCanvas.grid(row=0, column=0, sticky=tk.E+tk.W, columnspan = 50)\n# windowFrame = tk.Frame(app.caCanvas, width = 1000, height = 1000, bg = \"red\", bd = 5)\n# windowFrame.grid()\n# testLabel = tk.Label(windowFrame, text = \"BIN RULE: \", justify = \"left\", fg = 'white', bg = 'black')\n# testLabel.grid(sticky = W, rowspan=30)\n# app.caCanvas.create_image(100, 100 , image = app.caIMGs[29], anchor = tk.NW)\n\napp.mainloop()", "repo_name": "Spaceific-Studio/_WORK", "sub_path": "projects/CA_APP/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 52582, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "97", "api": [{"api_name": "tkinter.Button", "line_number": 13, "usage_type": "attribute"}, {"api_name": "tkinter.Button.__init__", "line_number": 20, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 20, "usage_type": "attribute"}, {"api_name": "tkinter.Frame", "line_number": 67, "usage_type": "attribute"}, {"api_name": "tkinter.Frame.__init__", "line_number": 73, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 73, "usage_type": "attribute"}, {"api_name": "tkinter.N", "line_number": 74, "usage_type": "attribute"}, {"api_name": "tkinter.S", "line_number": 74, "usage_type": "attribute"}, {"api_name": "tkinter.E", "line_number": 74, "usage_type": "attribute"}, {"api_name": "tkinter.W", "line_number": 74, "usage_type": "attribute"}, {"api_name": "tkinter.IntVar", "line_number": 80, "usage_type": "call"}, {"api_name": "tkinter.IntVar", "line_number": 82, "usage_type": "call"}, {"api_name": "tkinter.StringVar", "line_number": 84, "usage_type": "call"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 155, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 155, "usage_type": "name"}, {"api_name": "tkinter.NW", "line_number": 158, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 160, "usage_type": "call"}, {"api_name": "tkinter.NW", "line_number": 161, "usage_type": "attribute"}, {"api_name": "numpy.base_repr", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.base_repr", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.base_repr", "line_number": 199, "usage_type": "call"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 223, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 223, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 280, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 280, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 288, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 288, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 289, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 289, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 352, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 352, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 362, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 362, "usage_type": "name"}, {"api_name": "tkinter.NW", "line_number": 364, "usage_type": "attribute"}, {"api_name": "types.StringType", "line_number": 428, "usage_type": "attribute"}, {"api_name": "types.ListType", "line_number": 441, "usage_type": "attribute"}, {"api_name": "numpy.base_repr", "line_number": 710, "usage_type": "call"}, {"api_name": "numpy.base_repr", "line_number": 733, "usage_type": "call"}, {"api_name": "tkinter.Menu", "line_number": 765, "usage_type": "call"}, {"api_name": "tkinter.Menu", "line_number": 767, "usage_type": "call"}, {"api_name": "tkinter.Menu", "line_number": 771, "usage_type": "call"}, {"api_name": "tkinter.Menu", "line_number": 773, "usage_type": "call"}, {"api_name": "tkinter.Canvas", "line_number": 783, "usage_type": "call"}, {"api_name": "tkinter.Canvas", "line_number": 785, "usage_type": "call"}, {"api_name": "tkinter.Canvas", "line_number": 787, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 794, "usage_type": "call"}, {"api_name": "tkinter.E", "line_number": 795, "usage_type": "attribute"}, {"api_name": "tkinter.S", "line_number": 795, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 801, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 804, "usage_type": "call"}, {"api_name": "tkinter.Canvas", "line_number": 807, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 811, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 814, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 817, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 821, "usage_type": "call"}, {"api_name": "tkinter.Canvas", "line_number": 826, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 829, "usage_type": "call"}, {"api_name": "tkinter.Canvas", "line_number": 857, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 863, "usage_type": "call"}, {"api_name": "tkinter.W", "line_number": 864, "usage_type": "attribute"}, {"api_name": "tkinter.E", "line_number": 864, "usage_type": "attribute"}, {"api_name": "tkinter.N", "line_number": 864, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 867, "usage_type": "call"}, {"api_name": "tkinter.Radiobutton", "line_number": 871, "usage_type": "call"}, {"api_name": "tkinter.Radiobutton", "line_number": 875, "usage_type": "call"}, {"api_name": "tkinter.Canvas", "line_number": 879, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 882, "usage_type": "call"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 890, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 890, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 890, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 890, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 891, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 891, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 891, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 891, "usage_type": "name"}, {"api_name": "tkinter.Canvas", "line_number": 905, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 908, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 914, "usage_type": "call"}, {"api_name": "tkinter.Spinbox", "line_number": 918, "usage_type": "call"}, {"api_name": "tkinter.Radiobutton", "line_number": 927, "usage_type": "call"}, {"api_name": "tkinter.Radiobutton", "line_number": 931, "usage_type": "call"}, {"api_name": "tkinter.Radiobutton", "line_number": 935, "usage_type": "call"}, {"api_name": "tkinter.Radiobutton", "line_number": 939, "usage_type": "call"}, {"api_name": "tkinter.Canvas", "line_number": 953, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 963, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 967, "usage_type": "call"}, {"api_name": "tkinter.Scrollbar", "line_number": 975, "usage_type": "call"}, {"api_name": "tkinter.VERTICAL", "line_number": 975, "usage_type": "attribute"}, {"api_name": "tkinter.Scrollbar", "line_number": 979, "usage_type": "call"}, {"api_name": "tkinter.HORIZONTAL", "line_number": 979, "usage_type": "attribute"}, {"api_name": "tkinter.Frame", "line_number": 1023, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 1042, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 1062, "usage_type": "call"}]} +{"seq_id": "7964976861", "text": "# coding=utf-8\nimport os\nfrom config import getConfig\nfrom model.util import read_data\nimport jieba\nfrom zhon.hanzi import punctuation\nimport re\nimport sys\nimport time\nimport torch\nimport model.seq2seqModel as seq2seqModel\nfrom torch import optim\n\n\ndef preprocess():\n global MAX_TRAIN_DATA_SIZE\n if not os.path.exists(conv_path):\n exit()\n\n\n convs = [] \n with open(conv_path,encoding='utf-8') as f:\n one_conv = [] \n for line in f:\n line = line.strip('\\n').replace('?', '')\n line=re.sub(r\"[%s]+\" %punctuation, \"\",line)\n if line == '':\n continue\n if line[0] == gConfig['e']:\n if one_conv:\n convs.append(one_conv)\n one_conv = []\n elif line[0] == gConfig['m']:\n one_conv.append(line.split(' ')[1])\n \n seq = [] \n\n for conv in convs:\n if len(conv) == 1:\n continue\n if len(conv) % 2 != 0: \n conv = conv[:-1]\n for i in range(len(conv)):\n if i % 2 == 0:\n conv[i]=\" \".join(jieba.cut(conv[i]))\n conv[i+1]=\" \".join(jieba.cut(conv[i+1]))\n seq.append(conv[i]+'\\t'+conv[i+1])\n\n seq_train = open(gConfig['seq_data'],'w') \n\n for i in range(len(seq)):\n seq_train.write(seq[i]+'\\n')\n seq_train.close()\n MAX_TRAIN_DATA_SIZE = len(seq)\n print(MAX_TRAIN_DATA_SIZE, 'processed')\n\n\ndef train():\n print(\"Preparing data in %s\" % gConfig['train_data'])\n steps_per_epoch = len(input_tensor) // gConfig['batch_size']\n print(steps_per_epoch)\n checkpoint_dir = gConfig['weight_dir']\n\n checkpoint_prefix = os.path.join(checkpoint_dir, \"weight.pt\")\n start_time = time.time()\n encoder = seq2seqModel.Encoder(input_lang.n_words, hidden_size).to(device)\n decoder = seq2seqModel.AttentionDencoder(hidden_size, target_lang.n_words, dropout_p=0.1).to(device)\n if os.path.exists(checkpoint_prefix):\n checkpoint = torch.load(checkpoint_prefix)\n encoder.load_state_dict(checkpoint['modelA_state_dict'])\n decoder.load_state_dict(checkpoint['modelB_state_dict'])\n max_data=MAX_TRAIN_DATA_SIZE\n total_loss = 0\n batch_loss=1\n while batch_loss>gConfig['min_loss']:\n start_time_epoch = time.time()\n for i in range(1,(max_data//BATCH_SIZE)):\n inp=input_tensor[(i-1)*BATCH_SIZE:i*BATCH_SIZE]\n targ=target_tensor[(i-1)*BATCH_SIZE:i*BATCH_SIZE]\n batch_loss = seq2seqModel.train_step(inp, targ,encoder,decoder,optim.SGD(encoder.parameters(),lr=0.001),optim.SGD(decoder.parameters(),lr=0.01))\n total_loss += batch_loss\n print('Total step:{} loss {:.4f}'.format(i,batch_loss ))\n step_time_epoch = (time.time() - start_time_epoch) / steps_per_epoch\n step_loss = total_loss / steps_per_epoch\n current_steps = +steps_per_epoch\n step_time_total = (time.time() - start_time) / current_steps\n print('current_steps: {} step_time_total: {} step_time_epoch: {} batch_loss {:.4f}'.format(current_steps, step_time_total, step_time_epoch,\n batch_loss))\n torch.save({'modelA_state_dict': encoder.state_dict(),\n 'modelB_state_dict': decoder.state_dict()},checkpoint_prefix)\n sys.stdout.flush()\n\n\ngConfig = {}\ngConfig= getConfig.get_config()\nconv_path = gConfig['resource_data']\n\nMAX_TRAIN_DATA_SIZE = 0\n# Train\ngetConfig.remove_config(\"train_output\",\"max_train_data_size\")\npreprocess()\ngetConfig.write_config(\"train_output\",\"max_train_data_size\",str(MAX_TRAIN_DATA_SIZE))\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nunits=gConfig['layer_size']\nBATCH_SIZE=gConfig['batch_size']\nMAX_LENGTH=gConfig['max_length']\nEOS_token = 1\ninput_tensor,input_lang,target_tensor,target_lang= read_data(gConfig['seq_data'], MAX_TRAIN_DATA_SIZE,EOS_token,device)\nhidden_size = 256\nif __name__ == '__main__':\n if len(sys.argv) - 1:\n gConfig = getConfig.get_config(sys.argv[1])\n else:\n gConfig = getConfig.get_config()\n\n train()\n ", "repo_name": "onepiece010938/line_seq2seq_chatbot", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 4184, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "94", "api": [{"api_name": "os.path.exists", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 26, "usage_type": "call"}, {"api_name": "zhon.hanzi.punctuation", "line_number": 26, "usage_type": "name"}, {"api_name": "jieba.cut", "line_number": 45, "usage_type": "call"}, {"api_name": "jieba.cut", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 65, "usage_type": "call"}, {"api_name": "model.seq2seqModel.Encoder", "line_number": 66, "usage_type": "call"}, {"api_name": "model.seq2seqModel", "line_number": 66, "usage_type": "name"}, {"api_name": "model.seq2seqModel.AttentionDencoder", "line_number": 67, "usage_type": "call"}, {"api_name": "model.seq2seqModel", "line_number": 67, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 69, "usage_type": "call"}, {"api_name": "time.time", "line_number": 76, "usage_type": "call"}, {"api_name": "model.seq2seqModel.train_step", "line_number": 80, "usage_type": "call"}, {"api_name": "model.seq2seqModel", "line_number": 80, "usage_type": "name"}, {"api_name": "torch.optim.SGD", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 80, "usage_type": "name"}, {"api_name": "time.time", "line_number": 83, "usage_type": "call"}, {"api_name": "time.time", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 89, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 91, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 91, "usage_type": "attribute"}, {"api_name": "config.getConfig.get_config", "line_number": 95, "usage_type": "call"}, {"api_name": "config.getConfig", "line_number": 95, "usage_type": "name"}, {"api_name": "config.getConfig.remove_config", "line_number": 100, "usage_type": "call"}, {"api_name": "config.getConfig", "line_number": 100, "usage_type": "name"}, {"api_name": "config.getConfig.write_config", "line_number": 102, "usage_type": "call"}, {"api_name": "config.getConfig", "line_number": 102, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 104, "usage_type": "attribute"}, {"api_name": "model.util.read_data", "line_number": 109, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 112, "usage_type": "attribute"}, {"api_name": "config.getConfig.get_config", "line_number": 113, "usage_type": "call"}, {"api_name": "config.getConfig", "line_number": 113, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 113, "usage_type": "attribute"}, {"api_name": "config.getConfig.get_config", "line_number": 115, "usage_type": "call"}, {"api_name": "config.getConfig", "line_number": 115, "usage_type": "name"}]} +{"seq_id": "32707255143", "text": "import csv\nimport os\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\n\nfrom recipes.models import Ingredient\n\n\nDATA_ROOT = os.path.join(settings.BASE_DIR, 'data')\n\n\nclass Command(BaseCommand):\n\n def add_arguments(self, parser):\n parser.add_argument('path', default='ingredients.csv',\n type=str, help='Путь к файлу с данными.')\n\n def handle(self, *args, **options):\n self.stdout.write('Началась загрузка ингредиентов')\n with open(os.path.join(DATA_ROOT, options['path']), newline='',\n encoding='utf-8') as f:\n reader = csv.reader(f)\n for row in reader:\n name, measurement_unit = row\n Ingredient.objects.update_or_create(\n name=name,\n measurement_unit=measurement_unit\n )\n self.stdout.write('Загрузка ингредиентов завершена.')\n", "repo_name": "Pavlukov9/foodgram-project-react", "sub_path": "backend/foodgram/recipes/management/commands/load_ingredients.py", "file_name": "load_ingredients.py", "file_ext": "py", "file_size_in_byte": 1020, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.conf.settings.BASE_DIR", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 10, "usage_type": "name"}, {"api_name": "django.core.management.base.BaseCommand", "line_number": 13, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 23, "usage_type": "call"}, {"api_name": "recipes.models.Ingredient.objects.update_or_create", "line_number": 26, "usage_type": "call"}, {"api_name": "recipes.models.Ingredient.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "recipes.models.Ingredient", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "37412156606", "text": "\"\"\"Unit test for running tree system command\"\"\"\n\nimport logging\nfrom os import chdir\nfrom pathlib import Path\n\nfrom utils.bids.tree import tree_bids\n\n\ndef test_tree_bids_basic_results_works(caplog, tmp_path):\n \"\"\"Make sure tree output actually happens.\"\"\"\n\n caplog.set_level(logging.DEBUG)\n\n bids_path = Path(\"work/bids\")\n\n the_temp_dir = tmp_path / Path(bids_path)\n the_temp_dir.mkdir(parents=True)\n chdir(str(tmp_path))\n\n Path(\"work/bids/adir\").mkdir()\n Path(\"work/bids/adir/anotherfiile.json\").touch()\n Path(\"work/bids/afile.txt\").touch()\n Path(\"work/bids/anotherdir\").mkdir()\n Path(\"work/bids/anotherdir/README.md\").touch()\n\n tree_bids(bids_path, \"tree_out\")\n\n with open(\"tree_out.html\") as tfp:\n html = tfp.read().split(\"\\n\")\n\n assert len(caplog.records) == 2\n assert Path(\"tree_out.html\").exists()\n assert html[10] == \"work/bids/\" # has trailing '/'\n assert html[16] == \"2 directories, 3 files\"\n assert caplog.records[1].message == 'Wrote \"tree_out.html\"'\n\n\ndef test_tree_bids_directory_none_title_extra_work(caplog, tmp_path):\n \"\"\"Test when directory is none, when title and extra are given.\"\"\"\n\n caplog.set_level(logging.DEBUG)\n\n bids_path = Path(\"work/bids\")\n\n the_temp_dir = tmp_path / Path(bids_path)\n the_temp_dir.mkdir(parents=True)\n chdir(str(tmp_path))\n\n Path(\"work/bids/adir\").mkdir()\n Path(\"work/bids/adir/anotherfiile.json\").touch()\n Path(\"work/bids/afile.txt\").touch()\n Path(\"work/bids/anotherdir\").mkdir()\n Path(\"work/bids/anotherdir/README.md\").touch()\n\n tree_bids(None, \"tree_out\", title=\"Bozo\", extra=\"huge shoes\")\n\n with open(\"tree_out.html\") as tfp:\n html = tfp.read().split(\"\\n\")\n\n assert len(caplog.records) == 2\n assert Path(\"tree_out.html\").exists()\n assert html[7] == \"

Bozo

\"\n assert html[10] == \"(unknown)/\"\n assert html[11] == \"0 directories, 0 files\"\n assert html[13] == \"huge shoes\"\n assert caplog.records[1].message == 'Wrote \"tree_out.html\"'\n", "repo_name": "flywheel-apps/bids-fmriprep", "sub_path": "tests/unit_tests/test_tree.py", "file_name": "test_tree.py", "file_ext": "py", "file_size_in_byte": 2017, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "94", "api": [{"api_name": "logging.DEBUG", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 15, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 17, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 19, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 21, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 22, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 23, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 24, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 25, "usage_type": "call"}, {"api_name": "utils.bids.tree.tree_bids", "line_number": 27, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 33, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 44, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 46, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 48, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 50, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 51, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 52, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 53, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 54, "usage_type": "call"}, {"api_name": "utils.bids.tree.tree_bids", "line_number": 56, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "74398032950", "text": "import boto3\nimport requests\nfrom bs4 import BeautifulSoup\nimport random\nimport time\nimport logging\n\n#print(soup.prettify())\n\ns3 = boto3.client('s3')\n\nUNPROCESSED_BUCKET = 'unprocessed-bucket-tvw'\nPROCESSED_BUCKET = 'processed-bucket-tvw'\n\n# s3.put_object(Body=soup.prettify(), \n# Bucket='unprocessed-bucket-tvw', \n# Key='test.html')\nlogging.basicConfig(filename='logs/collect.log', \n encoding='utf-8', \n level=logging.DEBUG)\n\ndef write_content(content, filename, bucket=None):\n if bucket is None:\n with open(filename, 'wb') as f:\n f.write(content)\n else:\n s3.put_object(Body=content, Bucket=bucket, Key=filename)\n\ndef get_main_page():\n URL = \"https://chicago.craigslist.org/d/software-qa-dba-etc/search/sof\"\n\n r = requests.get(URL)\n\n write_content(r.content, f'search_results/{int(time.time())}.html', bucket=UNPROCESSED_BUCKET)\n with open(f'test_data/search_results/{int(time.time())}.html', 'wb') as f:\n f.write(r.content)\n \n logging.info('Got main page')\n \n return BeautifulSoup(r.content, 'html5lib')\n\ndef random_sleep(min=0.5, max=1.5):\n x = random.random()\n sleep_time = x * (max - min) + min\n time.sleep(sleep_time)\n\ndef get_sub_pages(soup):\n for link in soup.select('.result-title, .hdrlnk'):\n url = link['href']\n id_file = url.split('/')[-1]\n\n r = requests.get(link['href'])\n\n write_content(r.content, f'job_page/{id_file}', bucket=UNPROCESSED_BUCKET)\n \n logging.info(f'Got and saved {url}')\n\n random_sleep()\n\nsoup = get_main_page()\nget_sub_pages(soup)\n", "repo_name": "tylerwx51/job-scraper", "sub_path": "code/collect_html.py", "file_name": "collect_html.py", "file_ext": "py", "file_size_in_byte": 1652, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "boto3.client", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 18, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 20, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 32, "usage_type": "call"}, {"api_name": "time.time", "line_number": 34, "usage_type": "call"}, {"api_name": "time.time", "line_number": 35, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 38, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 40, "usage_type": "call"}, {"api_name": "random.random", "line_number": 43, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 45, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 52, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 56, "usage_type": "call"}]} +{"seq_id": "41582334490", "text": "from PyQt5.QtWidgets import QWidget, QVBoxLayout, QToolBar, QTableWidget, QAction\nfrom . import clearaction, outputtextedit\n\n\nclass OutPutWidget(QWidget):\n\n def __init__(self, scripter, parent=None):\n super(OutPutWidget, self).__init__(parent)\n\n self.scripter = scripter\n self.setObjectName(i18n('Output'))\n self.layout = QVBoxLayout()\n\n self.toolbar = QToolBar()\n self.clearAction = clearaction.ClearAction(self.scripter, self)\n self.toolbar.addAction(self.clearAction)\n\n self.outputtextedit = outputtextedit.OutPutTextEdit(self.scripter, self)\n\n self.layout.addWidget(self.toolbar)\n self.layout.addWidget(self.outputtextedit)\n self.setLayout(self.layout)\n", "repo_name": "KDE/krita", "sub_path": "plugins/python/scripter/ui_scripter/tabwidgets/outputwidget/outputwidget.py", "file_name": "outputwidget.py", "file_ext": "py", "file_size_in_byte": 737, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5906, "dataset": "github-code", "pt": "94", "api": [{"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 5, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 12, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QToolBar", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "38334484979", "text": "from logging import warn, warning\nimport os\nimport re\nimport random\n\nfrom numpy import append\n\n\ndef file_name(filename):\n return filename + \".file\"\n\ndef dup_name(name):\n return name + \"!!!\"\n\ndef read_rules(fh, rules, RE, debug):\n line_num = 0\n while(line_num < len(fh)):\n print(line_num)\n line = fh[line_num]\n line_num += 1\n if(line[0] == '#'):\n continue\n if(not re.match(\"\\S\", line)):\n continue\n word = line.split()\n name = word[0]\n rule = \"\"\n\n if(name[-1] == '!'):\n rules[dup_name(name)] = ''\n \n if(name == '.include'):\n file = word[1]\n\n if(file_name(file) in rules.keys()):\n if(debug > 0):\n print (\"Skipping duplicate included file $file\\n\")\n continue\n else:\n rules[file_name(file)] = 1\n with open(file, 'r') as f:\n inc_fh = f.readlines()\n read_rules(inc_fh, rules, None, debug)\n continue\n\n if(len(word) >= 2 and word[1] == '{'): # name included\n end = 0\n while(line_num < len(fh)):\n line_num += 1\n line =fh[line_num]\n if(re.match(\"^}[\\r\\n]+$\", line)):\n end = 1\n break\n else:\n rule += line.rstrip()\n if(not end):\n print(name, \"EOF found before close rule\")\n else:\n line_rm_trailing = ' '.join(line.split()[1:])\n rule = line_rm_trailing.rstrip()\n\n weight = 1\n name_by_weight = re.match(\"([^\\+]*)\\+(\\d+)$\", name)\n if(name_by_weight):\n name = name_by_weight.group(1)\n weight = eval(name_by_weight.group(2))\n if(debug > 10):\n warning(f\"weighting rule by {weight}: {name} {rule}\\n\")\n \n while(weight > 0):\n weight = weight - 1\n if(name == \"\\n\"):\n print(word)\n if(name not in rules.keys()):\n rules[name] = []\n rules[name].append(rule)\n if(RE is not None):\n return compute_re(rules, RE)\n\ndef compute_re(rules, RE):\n in_str = '|'.join(sorted(rules.keys(), key= lambda x:-len(x)))\n RE = f\"^(.*?)({in_str})\"\n return RE\n \ndef expand(rules, start, RE, debug):\n # check for special rules ending in + and # \n # Rules ending in + generate a sequential integer\n # The same rule ending in # chooses a random # from among preiously\n # generated integers\n start_match = re.match(\"(.*)\\+$\", start)\n if(start_match):\n rule = start_match.group(1)\n if(rule not in rules.keys()):\n i = 0\n rules[rule] = str(i)\n else:\n i = eval(rules[rule])\n rules[rule] = str(i+1)\n return str(i)\n\n start_match = re.match(\"(.*)\\#$\", start)\n if(start_match):\n rule = start_match.group(1)\n if(rule not in rules.keys()):\n i = 0\n rules[rule]= str(i)\n else:\n i = random.randint(0, eval(rules[rule]))\n return str(i)\n\n if(start == \"}\"):\n return start\n\n repeat = 1\n count = 0\n components = []\n while(repeat):\n input = pick_rand(rules[start])\n count += 1\n if(debug >= 5):\n warn(f\"{start} -> {input}\")\n \n repeat = 0\n while(True):\n first_rule = pop_first_rule(rules, input, RE)\n if(first_rule is None):\n break\n pre, rule, input = first_rule\n ex = expand(rules, rule, RE, debug)\n if(len(pre)):\n components.append(pre)\n if(len(ex)):\n components.append(ex)\n if(len(input)):\n components.append(input)\n full_token = \"\".join(components)\n if (dup_name(start) in rules.keys()):\n ref = rules[dup_name(start)]\n dups = ref\n for d in dups:\n if(d == full_token):\n repeat = 1\n if (not repeat):\n rules[dup_name(start)] += full_token\n elif count > 50:\n repeat = 0\n return full_token\n \n\n \ndef pop_first_rule(rules, input, RE): # rule and preamble are outputs\n input_match = re.match(RE, input)\n if(input_match):\n preamble = input_match.group(1)\n rule = input_match.group(2)\n input_new = input[len(preamble) + len(rule):]\n return preamble, rule, input_new\n return None\n\ndef pick_rand(in_set):\n return in_set[random.randint(0, len(in_set) - 1)]\n\ndef generate(rules, start, RE, debug, pretty):\n s = expand(rules, start, RE, debug)\n return s", "repo_name": "Saltflow/scigen_py", "sub_path": "read_rules.py", "file_name": "read_rules.py", "file_ext": "py", "file_size_in_byte": 4110, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "re.match", "line_number": 23, "usage_type": "call"}, {"api_name": "re.match", "line_number": 51, "usage_type": "call"}, {"api_name": "re.match", "line_number": 63, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 68, "usage_type": "call"}, {"api_name": "re.match", "line_number": 90, "usage_type": "call"}, {"api_name": "re.match", "line_number": 101, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 108, "usage_type": "call"}, {"api_name": "logging.warn", "line_number": 121, "usage_type": "call"}, {"api_name": "re.match", "line_number": 152, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 161, "usage_type": "call"}]} +{"seq_id": "33023897865", "text": "##############################################################\r\n\r\nfrom kivy.app import App\r\nfrom kivy.uix.widget import Widget\r\n\r\nfrom settings import *\r\n\r\nfrom kivy.core.window import Window\r\nfrom kivy.uix.popup import Popup\r\nfrom kivy.uix.label import Label\r\nfrom kivy.uix.gridlayout import GridLayout\r\nfrom kivy.uix.colorpicker import ColorPicker\r\n\r\nfrom kivy.graphics import (Color, Rectangle, Line, Ellipse)\r\nfrom kivy.uix.button import Button\r\nfrom kivy.uix.slider import Slider\r\n\r\n##############################################################\r\n\r\n\r\n# MAIN WIDGET CLASS\r\n\r\nclass MainWidget(Widget):\r\n pass\r\n\r\n\r\n# BACKGROUND COLOR CLASS\r\n\r\nclass BackgroundColorChange(GridLayout):\r\n def __init__(self, **kwargs):\r\n super(BackgroundColorChange, self).__init__(**kwargs)\r\n\r\n self.cols = 1\r\n\r\n self.bg_picker = ColorPicker(color=background_value)\r\n self.add_widget(self.bg_picker)\r\n self.bg_picker.bind(color=self.on_color)\r\n\r\n @staticmethod\r\n def on_color(instance, value):\r\n global background_value\r\n background_value = instance.color\r\n Window.clearcolor = background_value\r\n\r\n\r\n# BRUSH COLOR CLASS\r\n\r\nclass ColorChange(GridLayout):\r\n def __init__(self, **kwargs):\r\n super(ColorChange, self).__init__(**kwargs)\r\n\r\n self.cols = 1\r\n\r\n self.clr_picker = ColorPicker(color=color_value)\r\n self.add_widget(self.clr_picker)\r\n self.clr_picker.bind(color=self.on_color)\r\n\r\n @staticmethod\r\n def on_color(instance, value):\r\n global color_value\r\n color_value = instance.color\r\n\r\n\r\n# BRUSH SIZE CLASS\r\n\r\nclass BrushSlider(GridLayout):\r\n def __init__(self, **kwargs):\r\n super(BrushSlider, self).__init__(**kwargs)\r\n\r\n self.cols = 3\r\n\r\n self.brushControl = Slider(min=1, max=100, value=rad, step=1)\r\n\r\n self.add_widget(Label(text='Size'))\r\n self.add_widget(self.brushControl)\r\n\r\n self.brushValue = Label(text=str(rad))\r\n self.add_widget(self.brushValue)\r\n\r\n self.brushControl.bind(value=self.on_value_change)\r\n\r\n def on_value_change(self, instance, amount):\r\n global rad\r\n self.brushValue.text = \"% d\" % amount\r\n rad = amount\r\n\r\n\r\n# DRAWING CLASS\r\n\r\nclass PainterWidget(Widget):\r\n def __init__(self, **kwargs):\r\n super(PainterWidget, self).__init__(**kwargs)\r\n\r\n def on_touch_down(self, touch):\r\n with self.canvas:\r\n Color(*color_value)\r\n Ellipse(pos=(touch.x - rad / 2, touch.y - rad / 2), size=(rad, rad))\r\n touch.ud['line'] = Line(points=(touch.x, touch.y), width=rad / 2)\r\n\r\n def on_touch_move(self, touch):\r\n touch.ud['line'].points += (touch.x, touch.y)\r\n\r\n\r\n# MAIN APP CLASS\r\n\r\nclass PaintApp(App):\r\n def __init__(self, **kwargs):\r\n super().__init__(**kwargs)\r\n self.painter = PainterWidget()\r\n\r\n def build(self):\r\n\r\n parent = MainWidget()\r\n\r\n # WIDGETS\r\n\r\n parent.add_widget(self.painter)\r\n parent.add_widget(Button(text='Clear', size=btn_size, pos=(0, 0), on_press=self.clear_func))\r\n parent.add_widget(Button(text='Brush size', size=btn_size, pos=(100, 0), on_press=self.width_func))\r\n parent.add_widget(Button(text='Color', size=btn_size, pos=(200, 0), on_press=self.color))\r\n parent.add_widget(Button(text='Background', size=btn_size, pos=(300, 0),\r\n on_press=self.change_background_color))\r\n\r\n parent.add_widget(Button(text='Save', size=btn_size, pos=(400, 0), on_press=self.save))\r\n\r\n return parent\r\n\r\n # BACKGROUND COLOR UPDATE\r\n\r\n @staticmethod\r\n def change_background_color(instance):\r\n content = BackgroundColorChange()\r\n popup = Popup(title='Background Color', content=content, size_hint=(None, None), size=(600, 400))\r\n content.bind(on_press=popup.dismiss)\r\n popup.open()\r\n\r\n # CLEAR FUNCTION\r\n\r\n def clear_func(self, instance):\r\n self.painter.canvas.clear()\r\n\r\n # BRUSH SIZE FUNCTION\r\n\r\n @staticmethod\r\n def width_func(instance):\r\n content = BrushSlider()\r\n popup = Popup(title='Brush size', content=content, size_hint=(None, None), size=(400, 200))\r\n content.bind(on_press=popup.dismiss)\r\n popup.open()\r\n\r\n # IMAGE SAVING FUNCTION\r\n\r\n def save(self, instance):\r\n self.painter.size = (Window.size[0], Window.size[1])\r\n self.painter.export_to_png('image.png')\r\n content = Label(text='Image has been successfully saved!')\r\n popup = Popup(title='Saved', content=content, size_hint=(None, None), size=(400, 200))\r\n content.bind(on_press=popup.dismiss)\r\n popup.open()\r\n\r\n # BRUSH COLOR FUNCTION\r\n\r\n @staticmethod\r\n def color(instance):\r\n content = ColorChange()\r\n popup = Popup(title='Brush color changer', content=content, size_hint=(None, None), size=(600, 400))\r\n content.bind(on_press=popup.dismiss)\r\n popup.open()\r\n\r\n\r\nif __name__ == '__main__':\r\n PaintApp().run()\r\n", "repo_name": "scottarsenjr/simplekivypainter", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 5038, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "kivy.uix.widget.Widget", "line_number": 23, "usage_type": "name"}, {"api_name": "kivy.uix.gridlayout.GridLayout", "line_number": 29, "usage_type": "name"}, {"api_name": "kivy.uix.colorpicker.ColorPicker", "line_number": 35, "usage_type": "call"}, {"api_name": "kivy.core.window.Window.clearcolor", "line_number": 43, "usage_type": "attribute"}, {"api_name": "kivy.core.window.Window", "line_number": 43, "usage_type": "name"}, {"api_name": "kivy.uix.gridlayout.GridLayout", "line_number": 48, "usage_type": "name"}, {"api_name": "kivy.uix.colorpicker.ColorPicker", "line_number": 54, "usage_type": "call"}, {"api_name": "kivy.uix.gridlayout.GridLayout", "line_number": 66, "usage_type": "name"}, {"api_name": "kivy.uix.slider.Slider", "line_number": 72, "usage_type": "call"}, {"api_name": "kivy.uix.label.Label", "line_number": 74, "usage_type": "call"}, {"api_name": "kivy.uix.label.Label", "line_number": 77, "usage_type": "call"}, {"api_name": "kivy.uix.widget.Widget", "line_number": 90, "usage_type": "name"}, {"api_name": "kivy.graphics.Color", "line_number": 96, "usage_type": "call"}, {"api_name": "kivy.graphics.Ellipse", "line_number": 97, "usage_type": "call"}, {"api_name": "kivy.graphics.Line", "line_number": 98, "usage_type": "call"}, {"api_name": "kivy.app.App", "line_number": 106, "usage_type": "name"}, {"api_name": "kivy.uix.button.Button", "line_number": 118, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 119, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 120, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 121, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 124, "usage_type": "call"}, {"api_name": "kivy.uix.popup.Popup", "line_number": 133, "usage_type": "call"}, {"api_name": "kivy.uix.popup.Popup", "line_number": 147, "usage_type": "call"}, {"api_name": "kivy.core.window.Window.size", "line_number": 154, "usage_type": "attribute"}, {"api_name": "kivy.core.window.Window", "line_number": 154, "usage_type": "name"}, {"api_name": "kivy.uix.label.Label", "line_number": 156, "usage_type": "call"}, {"api_name": "kivy.uix.popup.Popup", "line_number": 157, "usage_type": "call"}, {"api_name": "kivy.uix.popup.Popup", "line_number": 166, "usage_type": "call"}]} +{"seq_id": "13933029611", "text": "import psycopg2\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport re\r\n\r\n\r\ndef make_soup(url):\r\n response = requests.get(url)\r\n soup = BeautifulSoup(response.text, 'html.parser')\r\n return soup\r\n\r\n\r\ndef extract_sex():\r\n url = \"https://wearmedicine.com\"\r\n soup = make_soup(url)\r\n ul_her = soup.find(\"a\", href=\"/k/ona/odziez\").next_sibling.next_sibling\r\n ul_him = soup.find(\"a\", href=\"/k/on/odziez\").next_sibling.next_sibling\r\n extract_category(ul_her, \"ona\")\r\n extract_category(ul_him, \"on\")\r\n\r\n\r\ndef extract_category(ul, sex):\r\n for i in ul.find_all(\"a\"):\r\n category_url = \"https://wearmedicine.com\" + i.get(\"href\")\r\n category_name = str(i.find(\"span\")).replace(\"\", \"\").replace(\"\", \"\")\r\n # category_name = str(i.string).lstrip('\\n')\r\n if category_url != \"https://wearmedicine.com\" and category_name is not None:\r\n extract_products(category_url, sex, category_name)\r\n # insert_category(category_name)\r\n\r\n\r\ndef extract_products(category_url, sex, category_name):\r\n soup = make_soup(category_url)\r\n for i in soup.find_all(\"div\", class_=\"product-item__thumb\"):\r\n product = i.find(\"a\").get(\"href\").strip(\"/p\")\r\n product_url = \"https://wearmedicine.com/p/\" + product\r\n extract_product(product_url, sex, category_name)\r\n\r\n\r\ndef extract_product(product_url, sex, category_name):\r\n soup = make_soup(product_url)\r\n title = str(soup.title.string)\r\n images = []\r\n desc = []\r\n\r\n div = soup.find_all(\"div\", class_=\"product__gallery-main\")\r\n for i in div:\r\n for j in i.find_all(\"img\"):\r\n images.append(str(j.get(\"src\")))\r\n # images.append(str(j.get(\"src\")))\r\n\r\n images = list(dict.fromkeys(images))\r\n\r\n for k in soup.find_all(\"p\", class_=\"product__price\"):\r\n price = re.findall(\"\\d+\\.\\d+\", k.text.replace(\",\", \".\"))\r\n id_price = insert_price(price[0])\r\n\r\n for l in soup.find_all(\"div\", class_=\"product__info\"):\r\n for m in l.find_all(\"div\", class_=\"row\"):\r\n for n in m.find_all(\"p\"):\r\n desc.append(n.text)\r\n for n in m.find_all(\"li\"):\r\n desc.append(n.text)\r\n\r\n description = \"\\n\".join(desc)\r\n\r\n id_category = find_category(category_name)\r\n id_product = insert_product(title, sex, description, product_url, 5, id_price, id_category)\r\n for o in images:\r\n insert_picture(o, id_product)\r\n\r\n\r\nconn = psycopg2.connect(host=\"localhost\", port=\"5432\", user=\"postgres\", database=\"postgres\", password=\"postgres\")\r\n\r\n\r\ndef insert_price(price):\r\n sql = \"\"\"INSERT INTO price(price)\r\n VALUES(%s) RETURNING id_price;\"\"\"\r\n id_price = None\r\n try:\r\n cur = conn.cursor()\r\n cur.execute(sql, (price,))\r\n id_price = cur.fetchone()[0]\r\n conn.commit()\r\n cur.close()\r\n except (Exception, psycopg2.DatabaseError) as error:\r\n print(error)\r\n\r\n return id_price\r\n\r\n\r\ndef insert_picture(img, id_product):\r\n sql = \"\"\"INSERT INTO picture(link, id_product)\r\n VALUES(%s, %s);\"\"\"\r\n try:\r\n cur = conn.cursor()\r\n cur.execute(sql, (img, id_product,))\r\n conn.commit()\r\n cur.close()\r\n except (Exception, psycopg2.DatabaseError) as error:\r\n print(error)\r\n\r\n\r\ndef insert_category(category):\r\n sql = \"\"\"INSERT INTO category(name)\r\n VALUES(%s) RETURNING id_category;\"\"\"\r\n id_category = None\r\n try:\r\n cur = conn.cursor()\r\n cur.execute(sql, (category,))\r\n id_category = cur.fetchone()[0]\r\n conn.commit()\r\n cur.close()\r\n except (Exception, psycopg2.DatabaseError) as error:\r\n print(error)\r\n\r\n return id_category\r\n\r\n\r\ndef find_category(category):\r\n sql = \"\"\"SELECT id_category FROM category WHERE name = %s;\"\"\"\r\n id_category = None\r\n try:\r\n cur = conn.cursor()\r\n cur.execute(sql, (category,))\r\n id_category = cur.fetchone()[0]\r\n conn.commit()\r\n cur.close()\r\n except (Exception, psycopg2.DatabaseError) as error:\r\n print(error)\r\n\r\n return id_category\r\n\r\n\r\ndef insert_product(product, sex, desc, link, id_store, id_price, id_category):\r\n sql = \"\"\"INSERT INTO product(name, sex, descr, link, id_store, id_price, id_category)\r\n VALUES(%s, %s, %s, %s, %s, %s, %s) RETURNING id_product;\"\"\"\r\n cur = conn.cursor()\r\n cur.execute(sql, (product, sex, desc, link, id_store, id_price, id_category,))\r\n id_product = cur.fetchone()[0]\r\n conn.commit()\r\n cur.close()\r\n # except (Exception, psycopg2.DatabaseError) as error:\r\n # print(error)\r\n\r\n return id_product\r\n\r\n\r\ndef main():\r\n extract_sex()\r\n # category_url = \"https://wearmedicine.com/k/on/odziez/t-shirty-i-polo\"\r\n# # extract_products(category_url)\r\n# product_url = \"https://wearmedicine.com/p/medicine-t-shirt-meski-z-nadrukiem-music-wall-szary-16784\"\r\n# extract_product(product_url, \"on\", \"T-shirty i polo\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()", "repo_name": "emchu/RecoScraper", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 5012, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "requests.get", "line_number": 8, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 9, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 55, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 73, "usage_type": "call"}, {"api_name": "psycopg2.DatabaseError", "line_number": 86, "usage_type": "attribute"}, {"api_name": "psycopg2.DatabaseError", "line_number": 100, "usage_type": "attribute"}, {"api_name": "psycopg2.DatabaseError", "line_number": 114, "usage_type": "attribute"}, {"api_name": "psycopg2.DatabaseError", "line_number": 129, "usage_type": "attribute"}]} +{"seq_id": "25079768407", "text": "from tgbot.commands.UserAccess import UserAccess\nfrom tgbot.commands.base import BaseCommand\nfrom utils.formatting import format_user_label\n\n\nclass AddVote(BaseCommand):\n command_names = ['addvote']\n access_level = UserAccess.VERIFIED\n\n def __init__(self, event, parsed):\n super().__init__(event, parsed, AddVote.access_level)\n\n async def process(self):\n await self.fetch_data()\n\n if not (await self.can_process()):\n return\n\n existing_votes = await self.db.getTgVoteActive(self.channel['channel_id'])\n if existing_votes is None or len(existing_votes) == 0:\n await self.reply_fail(self.get_translation('CMD_NOMINATE_VOTE_NOT_ACTIVE'))\n return\n\n existing_vote = existing_votes[0]\n\n if self.event.message.is_reply:\n message = await self.event.message.get_reply_message()\n target = await self.db.getUserByTgChatId(message.sender_id)\n if target is None or len(target) == 0:\n await self.reply_fail(self.get_translation('CMD_ADD_VOTE_NOT_VERIFIED'))\n return\n nominee = await self.db.getTgVoteNominee(existing_vote['tg_vote_id'], target[0]['user_id'])\n if nominee is None or len(nominee) == 0:\n await self.reply_fail(self.get_translation('CMD_ADD_VOTE_NOT_NOMINATED'))\n return\n\n nominee = nominee[0]\n else:\n try:\n num = int(self.parsed.pop(1))\n except Exception:\n num = 0\n\n if num <= 0:\n await self.reply_incorrect_input()\n return\n\n nominee_list = await self.db.getTgVoteNominees(existing_vote['tg_vote_id'])\n if nominee_list is None or len(nominee_list) < num:\n await self.reply_incorrect_input()\n return\n\n nominee = nominee_list[num - 1]\n\n if nominee is None:\n self.client.logger.error('Failed to find nominee')\n return\n\n await self.db.addTgVotePoint(existing_vote['tg_vote_id'], nominee['user_id'], self.sender['user_id'])\n\n nominee = await self.db.getTgVoteNominee(existing_vote['tg_vote_id'], nominee['user_id'])\n nominee = nominee[0]\n\n if nominee['tg_id'] is not None:\n user_entity = await self.client.get_entity(int(nominee['tg_id']))\n user_label = await format_user_label(user_entity)\n else:\n user_label = nominee['dname'] if nominee['dname'] is not None else nominee['name']\n\n await self.reply_success(self.get_translation('CMD_ADD_VOTE_SUCCESS').format(user=user_label, votes=nominee['votes']))\n", "repo_name": "kryabot/kryabot-core", "sub_path": "kryabot/tgbot/commands/vote/AddVote.py", "file_name": "AddVote.py", "file_ext": "py", "file_size_in_byte": 2684, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "97", "api": [{"api_name": "tgbot.commands.base.BaseCommand", "line_number": 6, "usage_type": "name"}, {"api_name": "tgbot.commands.UserAccess.UserAccess.VERIFIED", "line_number": 8, "usage_type": "attribute"}, {"api_name": "tgbot.commands.UserAccess.UserAccess", "line_number": 8, "usage_type": "name"}, {"api_name": "utils.formatting.format_user_label", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "36646875815", "text": "import time\n\nfrom appium import webdriver\nfrom pathlib import Path\n# from appium.webdriver.common.appiumby import AppiumBy\n# from appium.webdriver.appium_service import AppiumService\n# from selenium.webdriver.common.keys import Keys\nfrom selenium.common import NoSuchElementException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.select import Select\n\ndesired_caps = dict(\n\n deviceName='Android',\n platformName='Android',\n appPackage='com.amazon.mShop.android.shopping',\n appActivity='com.amazon.mShop.splashscreen.StartupActivity'\n)\n# desired_caps = {}\n# desired_caps['deviceName'] = 'Android'\n# desired_caps['platformName'] = 'Android'\n# desired_caps['appPackage'] = 'com.amazon.mShop.android.shopping'\n# desired_caps['appActivity'] = 'com.amazon.mShop.splashscreen.StartupActivity'\n\n\ndriver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)\ndriver.implicitly_wait(20)\n\ntry:\n a = driver.find_element(By.ID, \"com.android.permissioncontroller:id/permission_deny_button\")\n if a.is_displayed():\n driver.find_element(By.ID, \"com.android.permissioncontroller:id/permission_deny_button\").click()\n time.sleep(5)\n # wait = WebDriverWait(driver, 10)\n # wait.until(EC.element_to_be_clickable((By.XPATH, \"//android.widget.Button[@text='Done']\")))\n # driver.find_element(By.XPATH, \"//android.widget.Button[@text='Done']\").click()\n # wait = WebDriverWait(driver, 10)\n # wait.until(EC.element_to_be_clickable((By.ID, 'com.amazon.mShop.android.shopping:id/skip_sign_in_button')))\n # driver.find_element(By.ID, 'com.amazon.mShop.android.shopping:id/skip_sign_in_button').click()\n\nexcept NoSuchElementException:\n print(\"Permission page not found\")\n\ntry:\n b = driver.find_element(By.XPATH, \"//android.widget.Button[@text='Done']\")\n\n if b.is_displayed():\n driver.find_element(By.XPATH, \"//android.widget.Button[@text='Done']\").click()\n time.sleep(5)\n # wait = WebDriverWait(driver, 10)\n # wait.until(EC.element_to_be_clickable((By.ID, 'com.amazon.mShop.android.shopping:id/skip_sign_in_button')))\n # driver.find_element(By.ID, 'com.amazon.mShop.android.shopping:id/skip_sign_in_button').click()\n # time.sleep(1)\n\nexcept NoSuchElementException:\n print(\"Language Page Not Found\")\n\ntry:\n c = driver.find_element(By.ID, 'com.amazon.mShop.android.shopping:id/skip_sign_in_button')\n if c.is_displayed():\n driver.find_element(By.ID, 'com.amazon.mShop.android.shopping:id/skip_sign_in_button').click()\n\nexcept NoSuchElementException:\n print(\"Skip Sign not found\")\n\n\ntime.sleep(2)\n\ndriver.find_element(By.ID, 'com.amazon.mShop.android.shopping:id/chrome_search_hint_view').click()\n\nwait = WebDriverWait(driver, 10)\nwait.until(EC.element_to_be_clickable((By.ID, 'com.amazon.mShop.android.shopping:id/rs_search_src_text')))\n\ndriver.find_element(By.ID, 'com.amazon.mShop.android.shopping:id/rs_search_src_text').send_keys(\"Jeans\")\ndriver.press_keycode(66)\ntime.sleep(5)\n\nprint(\"Code Run Successfully\")\ndriver.quit()\n", "repo_name": "mdnhasan/AppiumTest", "sub_path": "testcases/modifiedif.py", "file_name": "modifiedif.py", "file_ext": "py", "file_size_in_byte": 3196, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "appium.webdriver.Remote", "line_number": 28, "usage_type": "call"}, {"api_name": "appium.webdriver", "line_number": 28, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 32, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 32, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 34, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 34, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 35, "usage_type": "call"}, {"api_name": "selenium.common.NoSuchElementException", "line_number": 43, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 47, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 47, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 50, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 50, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 51, "usage_type": "call"}, {"api_name": "selenium.common.NoSuchElementException", "line_number": 57, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 61, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 61, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 63, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 63, "usage_type": "name"}, {"api_name": "selenium.common.NoSuchElementException", "line_number": 65, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 69, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 71, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 71, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 73, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 74, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 74, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 74, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 74, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 76, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 76, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "9996615501", "text": "import unittest\n\nfrom collections import Counter\n\nclass NumberNeeded(object):\n def __init__(self, a, b):\n self.a = a\n self.b = b\n\n def number_needed(self, a, b):\n a_dict = Counter(self.a)\n b_dict = Counter(self.b)\n needed = 0\n\n for letter in a_dict:\n if letter not in b_dict or (a_dict[letter] != b_dict[letter]):\n needed += 1\n\n for letter in b_dict:\n if letter not in a_dict or (a_dict[letter] != b_dict[letter]):\n needed += 1\n\n return needed\n\n# a = input().strip()\n# b = input().strip()\n\n# print(number_needed(a, b))\n\nunittest.main()\n\nclass TestNumberNeeded(object):\n def setUp(self, a, b):\n self.a = a\n self.b = b\n\n def test_number_needed(self):\n assertEqual()\n", "repo_name": "jeffkwiat/cracking-the-coding-interview", "sub_path": "hackerrank/data-structures/making-anagrams.py", "file_name": "making-anagrams.py", "file_ext": "py", "file_size_in_byte": 805, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "94", "api": [{"api_name": "collections.Counter", "line_number": 11, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 12, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "42393985631", "text": "import sys, os\nimport django\nfrom pprint import pprint\n\nparent = os.path.abspath(\"..\")\nprint(parent)\nprint()\nsys.path.insert(1, parent)\nsys.path.append(\"../..\")\nsys.path.append(\"../../connector\")\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"connector.settings\")\ndjango.setup()\n\nfrom connector.models import Product, Order\nfrom connector.ozon_manager import OzonManager, OZON_ORDERS_STATUSES\nfrom connector.services.packages import Package, get_order_pcs\nfrom connector.config import MAX_ALLOWED_ITEMS, MAX_ALLOWED_PACKAGES\n\n\ndef filter_order_keys() -> list:\n \"\"\"Filter model fields to match order keys (flatten nested keys)\n\n Returns:\n list: filtered keys\n \"\"\"\n list_filter = [\n \"product_orders\",\n \"delivery_method_warehouse_id\",\n \"delivery_method_tpl_provider_id\",\n \"cancellation_initiator\",\n \"products\",\n \"tusmk_id\",\n \"cancellation\",\n \"product_orders\",\n ]\n\n order_keys = [f.name for f in Order._meta.get_fields() if f.name not in list_filter]\n return order_keys\n\n\nproducts = Product.objects.all()\nom = OzonManager()\n\ndelivery_methods = om.get_delivery_methods(\n {\"filter\": {\"warehouse_id\": 1020000071737000}}\n)\n\n\ndef test_build_packages(order):\n om = OzonManager()\n products = []\n products = [product[\"offer_id\"] for product in order[\"products\"]]\n product_quantities = {\n product[\"offer_id\"]: product[\"quantity\"] for product in order[\"products\"]\n }\n dimensions = om.get_dimensions(products)\n product_dimensions = {\n o[\"offer_id\"]: [o[\"width\"], o[\"height\"], o[\"depth\"], o[\"weight\"]]\n for o in dimensions[\"result\"]\n }\n product_data = []\n for p, q in product_quantities.items():\n for i in range(q):\n product_data.append([p, *product_dimensions[p]])\n package = Package()\n return package.pack(product_data, False)\n\n\nfor status in OZON_ORDERS_STATUSES:\n orders = om.get_orders_by_status(status)\n for o in orders[\"result\"][\"postings\"]:\n packages = test_build_packages(o)\n pieces = get_order_pcs(o)\n print(\"Заказ\", o[\"order_id\"], f\"Упаковок : {len(packages)}, Штук : {pieces}\")\n pprint(packages)\n\n if len(packages) > MAX_ALLOWED_PACKAGES:\n print(f\"{o['order_id']} Не помещается в одну упаковку, такой заказ обрабатываем вручную\")\n elif pieces > MAX_ALLOWED_ITEMS:\n print(f\"Количество позиций в {o['order_id']} - {pieces}, такой заказ обрабатываем вручную\")\n else:\n print(f'С этим заказом все в порядке, он будет обработа�� автоматически')\n", "repo_name": "elbrus-56/ozon_project", "sub_path": "ozondrf/connector/tests/test_packaging.py", "file_name": "test_packaging.py", "file_ext": "py", "file_size_in_byte": 2757, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "os.path.abspath", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.environ.setdefault", "line_number": 12, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.setup", "line_number": 13, "usage_type": "call"}, {"api_name": "connector.models.Order._meta.get_fields", "line_number": 38, "usage_type": "call"}, {"api_name": "connector.models.Order._meta", "line_number": 38, "usage_type": "attribute"}, {"api_name": "connector.models.Order", "line_number": 38, "usage_type": "name"}, {"api_name": "connector.models.Product.objects.all", "line_number": 42, "usage_type": "call"}, {"api_name": "connector.models.Product.objects", "line_number": 42, "usage_type": "attribute"}, {"api_name": "connector.models.Product", "line_number": 42, "usage_type": "name"}, {"api_name": "connector.ozon_manager.OzonManager", "line_number": 43, "usage_type": "call"}, {"api_name": "connector.ozon_manager.OzonManager", "line_number": 51, "usage_type": "call"}, {"api_name": "connector.services.packages.Package", "line_number": 66, "usage_type": "call"}, {"api_name": "connector.ozon_manager.OZON_ORDERS_STATUSES", "line_number": 70, "usage_type": "name"}, {"api_name": "connector.services.packages.get_order_pcs", "line_number": 74, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 76, "usage_type": "call"}, {"api_name": "connector.config.MAX_ALLOWED_PACKAGES", "line_number": 78, "usage_type": "name"}, {"api_name": "connector.config.MAX_ALLOWED_ITEMS", "line_number": 80, "usage_type": "name"}]} +{"seq_id": "16034984931", "text": "#8:24\n#카드 짝 맞추기 게임, 4*4 크기, 8가지 캐릭터가 2장씩 무작위 배치\n#같은 그림이면 사라지며 아니면 원래 상태로 뒷면이 보이도록 뒤집힘\n#1. 커서를 이용해서 카드 선택\n#2. 방향키 조작(1칸이동) + ctrl 방향키 조작(해당 방향 카드 or 마지막 칸)\n#3. enter를 통해 카드 뒤집기, 2번째 카드가 뒤집힐때까지 앞면 유지, 앞면이 2장이 된 경우 그림이 같으면 사라짐, 아니면 다시 뒤집힘\n#카드 앞면의 그림을 모두 알고있음, 남은 카드를 모두 제거하기 위한 횟수의 최솟값\n\n#순서 배열을 정한 후 각 경우 최선의 경로로 삭제\n\nimport itertools\nimport copy\nfrom collections import deque\n\n#이동 방향 초기화\ndx = [-1,1,0,0]\ndy = [0,0,-1,1]\n\n#x1, y1 -> x2, y2로 이동\ndef BFS(x1,y1,x2,y2,test_graph):\n queue = deque()\n queue.append([x1,y1])\n true = [[-1]*4 for i in range(4)]\n true[x1][y1] = 0\n while queue:\n x, y = queue.popleft()\n \n #기본 이동\n for i in range(4):\n nx, ny = x+dx[i], y+dy[i]\n if 0<=nx<4 and 0<=ny<4:\n if true[nx][ny] == -1: #방문 가능하지만 방문한 적이 없는 경우\n true[nx][ny] = true[x][y]+1\n queue.append([nx,ny])\n \n #커서 이동\n for i in range(4):\n k = 1\n nx, ny = x+dx[i], y+dy[i]\n if 0<=nx<4 and 0<=ny<4:\n while test_graph[nx][ny] == 0:\n k += 1\n sx, sy = nx, ny\n nx, ny = x+dx[i]*k, y+dy[i]*k\n if 0<=nx<4 and 0<=ny<4:\n continue\n else:\n nx, ny = sx, sy\n break\n if true[nx][ny] == -1:\n true[nx][ny] = true[x][y]+1\n queue.append([nx,ny])\n \n if true[x2][y2] != -1:\n break\n return true[x2][y2]\n\ndef RC(r,c,index,test_graph, arr, graph, Sum, answer):\n \n if index == len(arr):\n answer[0] = min(answer[0], Sum)\n return\n \n #첫번째 경우\n sx, sy = graph[arr[index]][0][0], graph[arr[index]][0][1]\n lx, ly = graph[arr[index]][1][0], graph[arr[index]][1][1]\n \n k1 = BFS(r,c,sx,sy,test_graph) #첫번째 이모티콘까지 커서 수\n k2 = BFS(sx,sy,lx,ly,test_graph) #두번째 이모티콘까지 커서 수\n \n v1 = test_graph[sx][sy]\n v2 = test_graph[lx][ly]\n test_graph[sx][sy] = 0\n test_graph[lx][ly] = 0\n \n RC(lx,ly,index+1,test_graph,arr,graph, Sum+k1+k2+2, answer)\n \n test_graph[sx][sy] = v1\n test_graph[lx][ly] = v2\n \n \n #두번째 경우\n sx, sy = graph[arr[index]][1][0], graph[arr[index]][1][1]\n lx, ly = graph[arr[index]][0][0], graph[arr[index]][0][1]\n \n k1 = BFS(r,c,sx,sy,test_graph) #첫번째 이모티콘까지 커서 수\n k2 = BFS(sx,sy,lx,ly,test_graph) #두번째 이모티콘까지 커서 수\n \n v1 = test_graph[sx][sy]\n v2 = test_graph[lx][ly]\n test_graph[sx][sy] = 0\n test_graph[lx][ly] = 0\n \n RC(lx,ly,index+1,test_graph,arr,graph, Sum+k1+k2+2, answer)\n\n test_graph[sx][sy] = v1\n test_graph[lx][ly] = v2\n \n return\n \ndef solution(board, r, c):\n \n #캐릭터 수 및 위치 초기화\n character = set()\n graph = dict()\n for i in range(4):\n for j in range(4):\n if board[i][j] != 0:\n character.add(board[i][j])\n if board[i][j] not in graph:\n graph[board[i][j]] = []\n graph[board[i][j]].append([i,j])\n character = list(character)\n \n #삭제할 순서 배열\n arrs = list(itertools.permutations(character))\n \n answer = [int(1e9)]\n \n #순서 배열돌면서 최소 값 확인\n for arr in arrs:\n \n #각 상황을 위한 그래프 초기화\n test_graph = copy.deepcopy(board)\n \n RC(r,c,0,test_graph, arr, graph, 0, answer)\n \n return answer[0]", "repo_name": "Gseungmin/Algorithm", "sub_path": "프로그래머스/lv3/72415. 카드 짝 맞추기/카드 짝 맞추기.py", "file_name": "카드 짝 맞추기.py", "file_ext": "py", "file_size_in_byte": 4092, "program_lang": "python", "lang": "ko", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "94", "api": [{"api_name": "collections.deque", "line_number": 21, "usage_type": "call"}, {"api_name": "itertools.permutations", "line_number": 116, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 124, "usage_type": "call"}]} +{"seq_id": "4767779965", "text": "import pytest\nimport requests\n\n\nclass TestCookie:\n def test_cookie(self):\n response = requests.get(\"https://playground.learnqa.ru/api/homework_cookie\")\n cookie = dict(response.cookies)\n print(cookie)\n assert cookie['HomeWork'] == \"hw_value\", \"The cookie parameter is not correct\"\n\n# pytest -s Lesson_3/test_example_11_cookie.py -k \"test_cookie\"\n", "repo_name": "Anton-Py/LearnQA_Python_API", "sub_path": "Lesson_3/test_example_11_cookie.py", "file_name": "test_example_11_cookie.py", "file_ext": "py", "file_size_in_byte": 377, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "requests.get", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "27905903869", "text": "from flask import Blueprint, request, jsonify\nimport bfly.majors.models\n\nmajors = Blueprint('majors', __name__)\n\n\n@majors.route(\"/jobs_by_majors\")\ndef list_jobs_for_major():\n token = request.args.get('page_token', None)\n majors = request.args.getlist('majors')\n\n if token:\n token = token.encode('utf-8')\n\n jobs, next_page_token = bfly.majors.models.list_jobs_by_majors(majors=majors, cursor=token)\n return jsonify(dict(jobs=jobs, page_token=next_page_token))\n\n\n@majors.route(\"/majors\")\ndef list_majors():\n token = request.args.get('page_token', None)\n if token:\n token = token.encode('utf-8')\n\n majors, next_page_token = bfly.majors.models.list_majors(cursor=token)\n\n return jsonify(dict(majors=majors, page_token=next_page_token))\n\n\n@majors.route('/majors/')\ndef view_major(title):\n major = bfly.majors.models.read_major(title)\n return jsonify(major)\n", "repo_name": "cnetedu/butterflyone", "sub_path": "webapp/bfly/majors/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 906, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "flask.Blueprint", "line_number": 4, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 9, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 9, "usage_type": "name"}, {"api_name": "flask.request.args.getlist", "line_number": 10, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 10, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 10, "usage_type": "name"}, {"api_name": "bfly.majors.models.majors.models.list_jobs_by_majors", "line_number": 15, "usage_type": "call"}, {"api_name": "bfly.majors.models.majors", "line_number": 15, "usage_type": "attribute"}, {"api_name": "bfly.majors.models", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 21, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 21, "usage_type": "name"}, {"api_name": "bfly.majors.models.majors.models.list_majors", "line_number": 25, "usage_type": "call"}, {"api_name": "bfly.majors.models.majors", "line_number": 25, "usage_type": "attribute"}, {"api_name": "bfly.majors.models", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 27, "usage_type": "call"}, {"api_name": "bfly.majors.models.majors.models.read_major", "line_number": 32, "usage_type": "call"}, {"api_name": "bfly.majors.models.majors", "line_number": 32, "usage_type": "attribute"}, {"api_name": "bfly.majors.models", "line_number": 32, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "38065460847", "text": "import bpy\nfrom ..enums import *\n\n\nclass M2_PT_event_panel(bpy.types.Panel):\n bl_space_type = \"PROPERTIES\"\n bl_region_type = \"WINDOW\"\n bl_context = \"object\"\n bl_label = \"M2 Event\"\n\n def draw_header(self, context):\n self.layout.prop(context.object.wow_m2_event, \"enabled\", text=\"\")\n\n def draw(self, context):\n layout = self.layout\n layout.enabled = context.object.wow_m2_event.enabled\n\n col = layout.column()\n col.prop(context.object.wow_m2_event, 'token')\n col.prop(context.object.wow_m2_event, 'enabled')\n\n event_name = M2EventTokens.get_event_name(context.object.wow_m2_event.token)\n if event_name in ('PlayEmoteSound', 'DoodadSoundUnknown', 'DoodadSoundOneShot', 'GOPlaySoundKitCustom'):\n col.label(text='SoundEntryID')\n col.prop(context.object.wow_m2_event, 'data')\n elif event_name == 'GOAddShake':\n col.label(text='SpellEffectCameraShakesID')\n col.prop(context.object.wow_m2_event, 'data')\n\n @classmethod\n def poll(cls, context):\n return (context.scene is not None\n and context.scene.wow_scene.type == 'M2'\n and context.object is not None\n and context.object.type == 'EMPTY'\n and not (context.object.wow_m2_attachment.enabled\n or context.object.wow_m2_uv_transform.enabled\n or context.object.wow_m2_camera.enabled)\n )\n\n\nclass WowM2EventPropertyGroup(bpy.types.PropertyGroup):\n\n enabled: bpy.props.BoolProperty(\n name='Enabled',\n description='Enabled this object to be a WoW M2 event',\n default=False\n )\n\n token: bpy.props.EnumProperty(\n name='Token',\n description='This token defines the purpose of the event',\n items=get_event_names\n )\n\n data: bpy.props.IntProperty(\n name='Data',\n description='Data passed when this event is fired',\n min=0\n )\n\n fire: bpy.props.BoolProperty(\n name='Enabled',\n description='Enable this event in this specific animation keyframe',\n default=False\n )\n\n\ndef register():\n bpy.types.Object.wow_m2_event = bpy.props.PointerProperty(type=WowM2EventPropertyGroup)\n\n\ndef unregister():\n del bpy.types.Object.wow_m2_event\n", "repo_name": "DeKaDeNcE/blender-wow-studio", "sub_path": "io_scene_wmo/m2/ui/panels/event.py", "file_name": "event.py", "file_ext": "py", "file_size_in_byte": 2321, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "94", "api": [{"api_name": "bpy.types", "line_number": 5, "usage_type": "attribute"}, {"api_name": "bpy.types", "line_number": 42, "usage_type": "attribute"}, {"api_name": "bpy.props.BoolProperty", "line_number": 44, "usage_type": "call"}, {"api_name": "bpy.props", "line_number": 44, "usage_type": "attribute"}, {"api_name": "bpy.props.EnumProperty", "line_number": 50, "usage_type": "call"}, {"api_name": "bpy.props", "line_number": 50, "usage_type": "attribute"}, {"api_name": "bpy.props.IntProperty", "line_number": 56, "usage_type": "call"}, {"api_name": "bpy.props", "line_number": 56, "usage_type": "attribute"}, {"api_name": "bpy.props.BoolProperty", "line_number": 62, "usage_type": "call"}, {"api_name": "bpy.props", "line_number": 62, "usage_type": "attribute"}, {"api_name": "bpy.types", "line_number": 70, "usage_type": "attribute"}, {"api_name": "bpy.props.PointerProperty", "line_number": 70, "usage_type": "call"}, {"api_name": "bpy.props", "line_number": 70, "usage_type": "attribute"}, {"api_name": "bpy.types", "line_number": 74, "usage_type": "attribute"}]} +{"seq_id": "42879899158", "text": "# from collections import deque\n# import sys\n# import copy\n\n# N=int(sys.stdin.readline())\n# indegree=[0]*(N+1)\n# graph=[[] for i in range(N+1)]\n# time=[0]*(N+1)\n\n# for i in range(1, N+1): # i는 과목번호.\n# data=list(map(int, sys.stdin.readline().split())) # data는 시간, 들어야 할 과목 번호, 과목 번호, ... , -1로 이루어진다.\n# time[i]=data[0] # data의 제일 앞에 오는 시간은 따로 time리스트에 저장하고,\n# for x in data[1:-1]: # data[1:-1]은 선수과목 번호가 나열된 리스트. 즉, x는 선수과목 번호를 의미.\n# indegree[i]+=1 # i가 1이면 indegree[1]+=1인데, 이 의미는 1번 과목을 들으려면 선수과목을 1개 더 들어야 한다는 의미. 즉, indegree는 선수과목의 개수.\n# graph[x].append(i) # graph는 어떤 과목(x)을 들은 후에 몇 번의(순서적 의미) 과목을 들어야 하는지 의미. graph[2].append(1)이라면 2번 과목을 듣고 1번 과목을 들어야 함을 의미.\n\n# def topology_sort():\n# result=copy.deepcopy(time) # deepcopy는 내부의 객체들까지 모두 copy. a를 deepcopy한 b가 있을 때, a 내부 객체에 변화를 주어도 b는 본래 카피된 a의 형태.\n# q=deque()\n\n# for i in range(1, N+1): # i는 과목번호.\n# if indegree[i]==0: # 들어야 할 선수과목이 없다면\n# q.append(i) # q에 과목번호를 넣어준다.\n\n# while q:\n# now=q.popleft() # 선수과목이 없는 과목번호를 꺼낸다.\n# for i in graph[now]: # i는 선수과목이 없는 과목을 들은 후, 다음으로 들어야 하는 과목을 의미. graph는 index과목 이후에 들어야 할 과목을 나타내므로.\n# result[i]=max(result[i], result[now]+time[i]) # i과목을 듣는 시간은 결과적으로 1. result[i]와 2. 선수과목이 없는 과목을 듣는 시간과 i과목을 듣는 시간을 합친 값 중 최대값.\n# indegree[i]-=1 # 위의 과정을 거치며 선수과목(now)을 하나 들은 셈이 되므로 -1을 해준다.\n# if indegree[i]==0: # 들어야 할 선수과목이 없다면\n# q.append(i) # q에 과목번호를 넣어준다.\n\n# for i in range(1, N+1):\n# print(result[i])\n\n# topology_sort()\n\n# 2020년 11월 7일 풀이\n\nfrom collections import deque\nimport sys\nimport copy\n\nN=int(sys.stdin.readline())\nindegree=[0]*(N+1)\ngraph=[[] for i in range(N+1)]\ntime=[0]*(N+1)\n\nfor i in range(1, N+1):\n data=list(map(int, sys.stdin.readline().split()))\n time[i]=data[0]\n for x in data[1:-1]:\n indegree[i]+=1\n graph[x].append(i)\n\ndef topology_sort():\n result=copy.deepcopy(time)\n q=deque()\n\n for i in range(1, N+1):\n if indegree[i]==0:\n q.append(i)\n\n while q:\n now=q.popleft()\n for i in graph[now]:\n result[i]=max(result[i], result[now]+time[i])\n indegree[i]-=1\n if indegree[i]==0:\n q.append(i)\n\n for i in range(1, N+1):\n print(result[i])\n\ntopology_sort()\n", "repo_name": "arajo-hub/Algorithm-practice", "sub_path": "Book/Curriculum.py", "file_name": "Curriculum.py", "file_ext": "py", "file_size_in_byte": 3086, "program_lang": "python", "lang": "ko", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "94", "api": [{"api_name": "sys.stdin.readline", "line_number": 44, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 44, "usage_type": "attribute"}, {"api_name": "sys.stdin.readline", "line_number": 50, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 50, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 57, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "19661325430", "text": "# coding:utf-8\n\"\"\"\n计算帧之间的差异 考虑背景帧与其他帧之间的差异\n\"\"\"\nimport cv2\nimport numpy as np\n\nprint(\"hello world\")\n# 打开视频文件,命名为video\nvideo = cv2.VideoCapture('001-bg-01-090.avi')\n# 定义核为kernel\nkernel = np.ones((5, 5), np.uint8)\nbackground = None\n\nwhile True:\n # 读入摄像头的帧\n ret, frame = video.read()\n # ret作为布尔值,Ture代表有读取到图片\n # 把第一帧作为背景\n if background is None:\n background = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n background = cv2.GaussianBlur(background, (5, 5), 0)\n continue\n # 读入帧\n gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # 高斯平滑 模糊处理 减小光照 震动等原因产生的噪声影响\n gray_frame = cv2.GaussianBlur(gray_frame, (5, 5), 0)\n\n # 检测背景和帧的区别\n diff = cv2.absdiff(background, gray_frame)\n # 将区别转为二值,定义阈值为25,最大值为255,判断方式为大于阈值设置为255,小于则设置为0\n diff = cv2.threshold(diff, 25, 255, cv2.THRESH_BINARY)[1]\n # 定义结构元素,定义结构元素为椭圆形状,内核尺寸为(9,4)\n # es = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4,9))\n # 膨胀运算\n # diff = cv2.dilate(diff, es, iterations=2)\n # 搜索轮廓\n cnts, hierarcchy= cv2.findContours(diff.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n \"\"\"\n cv.findContours()\n 参数:\n 1 要寻找轮廓的图像 只能传入二值图像,不是灰度图像\n 2 轮廓的检索模式,有四种:\n cv2.RETR_EXTERNAL表示只检测外轮廓\n cv2.RETR_LIST检测的轮廓不建立等级关系\n cv2.RETR_CCOMP建立两个等级的轮廓,上面的一层为外边界,\n 里面的一层为内孔的边界信息。\n 如果内孔内还有一个连通物体,这个物体的边界也在顶层\n cv2.RETR_TREE建立一个等级树结构的轮廓\n 3 轮廓的近似办法\n cv2.CHAIN_APPROX_NONE存储所有的轮廓点,\n 相邻的两个点的像素位置差不超过1,\n 即max(abs(x1-x2),abs(y2-y1))==1\n cv2.CHAIN_APPROX_SIMPLE压缩水平方向,垂直方向,对角线方向的元素,\n 只保留该方向的终点坐标,例如一个矩形轮廓只需4个点来保存轮廓信息\n 返回值:\n contours:一个列表,每一项都是一个轮廓, 不会存储轮廓所有的点,只存储能描述轮廓的点\n hierarchy:一个ndarray, 元素数量和轮廓数量一样,\n 每个轮廓contours[i]对应4个hierarchy元素hierarchy[i][0] ~hierarchy[i][3],\n 分别表示后一个轮廓、前一个轮廓、父轮廓、内嵌轮廓的索引编号,如果没有对应项,则该值为负数\n \"\"\"\n\n for c in cnts:\n # 轮廓太小忽略 有可能是斑点噪声\n if cv2.contourArea(c) < 2000:\n continue\n # 将轮廓画出来\n (x, y, w, h) = cv2.boundingRect(c)\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)\n\n cv2.imshow(\"contours\", frame)\n cv2.imshow(\"diff\", diff)\n if cv2.waitKey(5) & 0xff == ord(\"q\"):\n break\n\ncv2.destroyAllWindows()\ncamera.release()", "repo_name": "ZMC-20001105/gait_recognition", "sub_path": "my_first_code.py", "file_name": "my_first_code.py", "file_ext": "py", "file_size_in_byte": 3436, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "cv2.VideoCapture", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cv2.GaussianBlur", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 25, "usage_type": "attribute"}, {"api_name": "cv2.GaussianBlur", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.absdiff", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 32, "usage_type": "attribute"}, {"api_name": "cv2.findContours", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.RETR_EXTERNAL", "line_number": 38, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 38, "usage_type": "attribute"}, {"api_name": "cv2.contourArea", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.boundingRect", "line_number": 68, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 69, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 71, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 72, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 73, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "11142504436", "text": "import re\nimport csv\nimport io\nfrom jsonschema import validate\nfrom jsonschema.exceptions import ValidationError as JsonSchemaValidationError\n\n\nclass MCASValidator(object):\n \"\"\"MCAS Validator.\"\"\"\n\n def __init__(self, logger):\n \"\"\"Init method.\"\"\"\n super().__init__()\n self.logger = logger\n\n def validate_portal_url(self, portal_url):\n \"\"\"Validate Portal url. If not present, issues appropriate logs and exists docker.\n\n :param: portal_url: the Portal url to be validated\n :returns: Whether the provided value is valid or not. True in case of valid value, False otherwise\n \"\"\"\n if not portal_url:\n return False\n\n if portal_url.strip().startswith(\n \"http://\"\n ) or portal_url.strip().startswith(\"https://\"):\n return False\n\n return True\n\n def validate_data_source(self, data_source):\n \"\"\"Validate Data Source. If not present, issues appropriate logs and exists docker.\n\n :param: data_source: the Data Source to be validated\n :returns: Whether the provided value is valid or not. True in case of valid value, False otherwise\n \"\"\"\n if not data_source:\n return False\n\n data_source_regex = re.compile(r\"^[A-Za-z0-9_-]*$\")\n return True if data_source_regex.match(data_source) else False\n\n def validate_taxonomy(self, instance):\n \"\"\"Validate the schema of given taxonomy JSON.\n\n :param instance: The JSON object to be validated\n :return: True if the schema is valid, False otherwise\n \"\"\"\n schema = {\n \"type\": \"object\",\n \"properties\": {\n \"header\": {\"type\": \"object\", \"minProperties\": 0},\n \"extension\": {\"type\": \"object\", \"minProperties\": 0},\n },\n \"required\": [\"header\", \"extension\"],\n }\n\n validate(instance=instance, schema=schema)\n\n def validate_json(self, instance):\t\n \"\"\"Validate the schema of given taxonomy JSON.\t\n Args:\t\n instance: The JSON object to be validated\t\n Returns:\t\n True if the schema is valid, False otherwise\t\n \"\"\"\t\n schema = {\t\n \"type\": \"object\",\t\n \"patternProperties\": {\t\n \".*\": {\t\n \"type\": \"object\",\t\n \"patternProperties\": {\t\n \".*\": {\t\n \"type\": \"array\",\t\n }\t\n }\t\n },\t\n },\t\n }\t\n validate(instance=instance, schema=schema)\n\n def validate_mapping_schema(self, mappings):\n \"\"\"Read the given mapping file and validates its schema.\n\n :param mapping_file: The mapping file from which the schema is to be validated\n :return: True in case of valid schema, False otherwise\n \"\"\"\n # Schema of mapping file\n schema = {\n \"type\": \"object\",\n \"properties\": {\n \"delimiter\": {\"type\": \"string\", \"minLength\": 1, \"enum\": [\"|\"]},\n \"validator\": {\"type\": \"string\", \"minLength\": 1},\n \"cef_version\": {\"type\": \"string\", \"minLength\": 1},\n \"taxonomy\": {\n \"type\": \"object\",\n \"properties\": {\n \"alerts\": {\"type\": \"object\"},\n \"events\": {\"type\": \"object\"},\n },\n },\n },\n \"required\": [\"delimiter\", \"taxonomy\", \"cef_version\"],\n }\n\n # If no exception is raised by validate(), the instance is valid.\n try:\n validate(instance=mappings, schema=schema)\n except JsonSchemaValidationError as err:\n self.logger.error(\n \"Error occurred while validating JSON schema: {}\".format(err)\n )\n return False\n\n # Validate the schema of all taxonomy\n for data_type, dtype_taxonomy in mappings[\"taxonomy\"].items():\n if data_type == \"json\":\t\n self.validate_json(dtype_taxonomy)\t\n else:\n for subtype, subtype_taxonomy in dtype_taxonomy.items():\n try:\n self.validate_taxonomy(subtype_taxonomy)\n except JsonSchemaValidationError as err:\n self.logger.error(\n 'Error occurred while validating JSON schema for type \"{}\" and subtype \"{}\": '\n \"{}\".format(data_type, subtype, err)\n )\n return False\n return True\n\n def validate_mcas_map(self, mappings):\n \"\"\"Validate field mappings file. If not present, issues appropriate logs and exists docker.\n\n :param: mapping_file: the field mappings file to be validated\n :returns: Whether the provided value is valid or not. True in case of valid value, False otherwise\n \"\"\"\n if not mappings:\n self.logger.error(\"Could not find mcas mappings.\")\n return False\n try:\n if self.validate_mapping_schema(mappings):\n return True\n except Exception as err:\n self.logger.error(\n \"An error occurred while validating the fields from the mapping file: {}\".format(\n str(err)\n )\n )\n\n return False\n\n def validate_valid_extensions(self, valid_extensions):\n \"\"\"Validate CSV extensions.\n\n Args:\n valid_extensions: the CSV string to be validated\n\n Returns:\n Whether the provided value is valid or not. True in case of valid value, False otherwise\n \"\"\"\n try:\n csviter = csv.DictReader(\n io.StringIO(valid_extensions), strict=True\n )\n headers = next(csviter)\n\n if all(\n header in headers\n for header in [\"CEF Key Name\", \"Length\", \"Data Type\"]\n ):\n return True\n except Exception:\n return False\n\n return False\n", "repo_name": "netskopeoss/ta_cloud_exchange_plugins", "sub_path": "mcas_cls/utils/mcas_validator.py", "file_name": "mcas_validator.py", "file_ext": "py", "file_size_in_byte": 6135, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "94", "api": [{"api_name": "re.compile", "line_number": 41, "usage_type": "call"}, {"api_name": "jsonschema.validate", "line_number": 59, "usage_type": "call"}, {"api_name": "jsonschema.validate", "line_number": 81, "usage_type": "call"}, {"api_name": "jsonschema.validate", "line_number": 109, "usage_type": "call"}, {"api_name": "jsonschema.exceptions.ValidationError", "line_number": 110, "usage_type": "name"}, {"api_name": "jsonschema.exceptions.ValidationError", "line_number": 124, "usage_type": "name"}, {"api_name": "csv.DictReader", "line_number": 163, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 164, "usage_type": "call"}]} +{"seq_id": "3320955479", "text": "import re\nimport nltk\nfrom flask import Flask, request\nfrom nltk.tokenize import sent_tokenize\nfrom transformers import MarianMTModel, MarianTokenizer\n\n\nnltk.download('punkt')\nmodchoice = \"Helsinki-NLP/opus-mt-en-zh\"\napp = Flask(__name__)\napp.config['JSON_AS_ASCII'] = False\n\ntokenizer = MarianTokenizer.from_pretrained(modchoice)\nmodel = MarianMTModel.from_pretrained(modchoice)\n\n\ndef clean_text(text):\n text = text.encode(\"ascii\", errors=\"ignore\").decode(\n \"ascii\"\n ) # remove non-ascii, Chinese characters\n text = re.sub(r\"\\n\", \" \", text)\n text = re.sub(r\"\\n\\n\", \" \", text)\n text = re.sub(r\"\\t\", \" \", text)\n text = re.sub(r\"http\\S+\", \"\", text)\n text = re.sub(r\"ADVERTISEMENT\", \" \", text)\n text = re.sub(\n r\"Download our app or subscribe to our Telegram channel for the latest updates on the coronavirus outbreak: https://cna.asia/telegram\",\n \" \",\n text,\n )\n text = re.sub(\n r\"Download our app or subscribe to our Telegram channel for the latest updates on the COVID-19 outbreak: https://cna.asia/telegram\",\n \" \",\n text,\n )\n text = text.strip(\" \")\n text = re.sub(\n \" +\", \" \", text\n ).strip() # get rid of multiple spaces and replace with a single\n return text\n\ndef translate(text):\n input_text = clean_text(text)\n if input_text is None or text == \"\":\n return (\"Error\",)\n translated = model.generate(\n **tokenizer.prepare_seq2seq_batch(\n sent_tokenize(input_text),\n truncation=True,\n padding=\"longest\",\n return_tensors=\"pt\"\n )\n )\n tgt_text = [tokenizer.decode(t, skip_special_tokens=True) for t in translated]\n return \" \".join(tgt_text)\n\n\n@app.route(\"/translate\", methods=['POST'])\ndef app_translate():\n req_data = request.get_json(force=True)\n text = req_data.get('text', '')\n\n return {\n 'text': translate(text)\n }\n\nif __name__ == \"__main__\":\n app.run(threaded=True)\n\n# translate(\"Hello world. This is last time \")\n\n# translate(\"\"\"The project is called QueryStorm. It uses Roslyn to offer C# (and VB.NET) support in Excel, as an alternative to VBA. I've posted about it before, but a lot has changed since then so figured I'd share an update.\n# The current version includes a host of new features, namely a C# debugger, support for NuGet packages, and the ability to publish Excel extensions to an \"AppStore\" (which is essentially a NuGet repository). The AppStore can be used by anyone with the (free) runtime component.\n# Another great addition is the community license, which is a free license for individuals and small companies to use. It unlocks most features, but it isn't intended for companies with more than 5 employees or over $1M in annual revenue.\n# I would love to hear your feedback and am happy to answer any technical questions about how QueryStorm is implemented.\"\"\")", "repo_name": "corvofeng/Translater", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2896, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "nltk.download", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 10, "usage_type": "call"}, {"api_name": "transformers.MarianTokenizer.from_pretrained", "line_number": 13, "usage_type": "call"}, {"api_name": "transformers.MarianTokenizer", "line_number": 13, "usage_type": "name"}, {"api_name": "transformers.MarianMTModel.from_pretrained", "line_number": 14, "usage_type": "call"}, {"api_name": "transformers.MarianMTModel", "line_number": 14, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 21, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 22, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 23, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 24, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 25, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 26, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 31, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 37, "usage_type": "call"}, {"api_name": "nltk.tokenize.sent_tokenize", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 60, "usage_type": "name"}]} +{"seq_id": "18915326689", "text": "import logging\nfrom logutils.testing import TestHandler, Matcher\nfrom logutils.queue import QueueHandler, QueueListener, queue\nimport unittest\n\nclass QueueTest(unittest.TestCase):\n def setUp(self):\n self.handler = h = TestHandler(Matcher())\n self.logger = l = logging.getLogger()\n self.queue = q = queue.Queue(-1)\n self.qh = qh = QueueHandler(q)\n self.ql = ql = QueueListener(q, h)\n ql.start()\n l.addHandler(qh)\n\n def tearDown(self):\n self.logger.removeHandler(self.qh)\n self.qh.close()\n self.handler.close()\n\n def test_simple(self):\n \"Simple test of queue handling and listening.\"\n # Just as a demo, let's log some messages.\n # Only one should show up in the log.\n self.logger.debug(\"This won't show up.\")\n self.logger.info(\"Neither will this.\")\n self.logger.warning(\"But this will.\")\n self.ql.stop() #ensure all records have come through.\n h = self.handler\n #import pdb; pdb.set_trace()\n self.assertTrue(h.matches(levelno=logging.WARNING))\n self.assertFalse(h.matches(levelno=logging.DEBUG))\n self.assertFalse(h.matches(levelno=logging.INFO))\n\n def test_partial(self):\n \"Test of partial matching through queues.\"\n # Just as a demo, let's log some messages.\n # Only one should show up in the log.\n self.logger.debug(\"This won't show up.\")\n self.logger.info(\"Neither will this.\")\n self.logger.warning(\"But this will.\")\n self.ql.stop() #ensure all records have come through.\n h = self.handler\n self.assertTrue(h.matches(msg=\"ut th\")) # from \"But this will\"\n self.assertTrue(h.matches(message=\"ut th\")) # from \"But this will\"\n self.assertFalse(h.matches(message=\"either\"))\n self.assertFalse(h.matches(message=\"won't\"))\n\n def test_multiple(self):\n \"Test of matching multiple values through queues.\"\n # Just as a demo, let's log some messages.\n # Only one should show up in the log.\n self.logger.debug(\"This won't show up.\")\n self.logger.info(\"Neither will this.\")\n self.logger.warning(\"But this will.\")\n self.logger.error(\"And so will this.\")\n self.ql.stop() #ensure all records have come through.\n h = self.handler\n self.assertTrue(h.matches(levelno=logging.WARNING,\n message='ut thi'))\n self.assertTrue(h.matches(levelno=logging.ERROR,\n message='nd so wi'))\n self.assertFalse(h.matches(levelno=logging.INFO))\n\nif __name__ == '__main__':\n unittest.main()\n", "repo_name": "vsajip/logutils", "sub_path": "tests/test_queue.py", "file_name": "test_queue.py", "file_ext": "py", "file_size_in_byte": 2659, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "94", "api": [{"api_name": "unittest.TestCase", "line_number": 6, "usage_type": "attribute"}, {"api_name": "logutils.testing.TestHandler", "line_number": 8, "usage_type": "call"}, {"api_name": "logutils.testing.Matcher", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "logutils.queue.queue.Queue", "line_number": 10, "usage_type": "call"}, {"api_name": "logutils.queue.queue", "line_number": 10, "usage_type": "name"}, {"api_name": "logutils.queue.QueueHandler", "line_number": 11, "usage_type": "call"}, {"api_name": "logutils.queue.QueueListener", "line_number": 12, "usage_type": "call"}, {"api_name": "logging.WARNING", "line_number": 31, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 32, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 33, "usage_type": "attribute"}, {"api_name": "logging.WARNING", "line_number": 59, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 61, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 63, "usage_type": "attribute"}, {"api_name": "unittest.main", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "26112709468", "text": "from django.shortcuts import render, redirect\nfrom .models import *\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import login, logout, authenticate\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.http import HttpResponse\nfrom django.db.models import Q\nfrom random import randrange\nfrom django.conf import settings\nfrom django.core.mail import send_mail\nfrom eshop.settings import RAZOR_KEY_ID,RAZOR_KEY_SECRET\nimport razorpay\n\n\n\ndef home(Request):\n data = Product.objects.all().order_by('id').reverse()[:8]\n\n return render(Request, \"index.html\", {'data': data})\n\n\ndef shop(Request, mc, sc, br):\n if mc == \"All\" and sc == \"All\" and br == \"All\":\n data = Product.objects.all().order_by('id').reverse()\n elif mc != \"All\" and sc == \"All\" and br == \"All\":\n data = Product.objects.filter(\n maincategory=Maincategory.objects.get(name=mc))\n elif mc == \"All\" and sc != \"All\" and br == \"All\":\n data = Product.objects.filter(\n subcategory=Subcategory.objects.get(name=sc))\n elif mc == \"All\" and sc == 'All' and br != 'All':\n data = Product.objects.filter(brand=Brand.objects.get(name=br))\n elif mc != \"All\" and sc != 'All' and br == 'All':\n data = Product.objects.filter(maincategory=Maincategory.objects.get(\n name=mc), subcategory=Subcategory.objects.get(name=sc))\n elif mc != \"All\" and sc == 'All' and br != 'All':\n data = Product.objects.filter(maincategory=Maincategory.objects.get(\n name=mc), brand=Brand.objects.get(name=br))\n elif mc == 'All' and sc != 'All' and br != 'All':\n data = Product.objects.filter(subcategory=Subcategory.objects.get(\n name=sc), brand=Brand.objects.get(name=br))\n else:\n data = Product.objects.filter(maincategory=Maincategory.objects.get(\n name=mc), subcategory=Subcategory.objects.get(name=sc), brand=Brand.objects.get(name=br))\n maincategory = Maincategory.objects.all()\n subcategory = Subcategory.objects.all()\n brand = Brand.objects.all()\n return render(Request, \"shop.html\", {'data': data, 'maincategory': maincategory, 'subcategory': subcategory, 'brand': brand, 'mc': mc, 'sc': sc, 'br': br})\n\n\ndef singleProduct(Request, id):\n data = Product.objects.get(id=id)\n return render(Request, \"single-product.html\", {'data': data})\n\n\ndef loginPage(Request):\n if Request.method == 'POST':\n username = Request.POST.get('username')\n password = Request.POST.get('password')\n user = authenticate(username=username, password=password)\n if user:\n login(Request, user)\n return redirect('/profile')\n else:\n messages.error(Request, \"Invalid Username or Password\")\n return render(Request, \"login.html\")\n\n\ndef logoutPage(Request):\n logout(Request)\n return redirect('/login/')\n\n\ndef signupPage(Request):\n if Request.method == 'POST':\n p = Request.POST.get('password')\n cp = Request.POST.get('cpassword')\n if p == cp:\n b = Buyer()\n b.name = Request.POST.get('name')\n b.username = Request.POST.get('username')\n b.email = Request.POST.get('email')\n b.phone = Request.POST.get('phone')\n user = User(username=b.username, email=b.email)\n user.set_password(p)\n try:\n user.save()\n b.save()\n subject = \"Your Account is Created : Team Eshop\"\n message = ' Hello '+str(b.name)+\"\\nThanks to Create a Buyer Account with us\\nNow You can Buy Our Latest Products:\\nTeam Eshop\"\n recipient_list = [b.email]\n email_from=settings.EMAIL_HOST_USER\n send_mail(subject, message, email_from,recipient_list)\n return redirect(\"/login/\")\n except:\n messages.error(Request, \"Username Already Taken ....\")\n else:\n messages.error(\n Request, \"Password and Confirm Password Doesn't Matched ....\")\n return render(Request, \"signup.html\")\n\n\n@login_required(login_url='/login')\ndef profilePage(Request):\n user = User.objects.get(username=Request.user)\n if user.is_superuser:\n return redirect('/admin/')\n else:\n buyer = Buyer.objects.get(username=user.username)\n wishlist = Wishlist.objects.filter(user=buyer)\n orders = Checkout.objects.filter(user=buyer)\n return render(Request, \"profile.html\", {'user': buyer, 'wishlist': wishlist, 'orders': orders})\n\n\n@login_required(login_url=\"/login\")\ndef updateProfilePage(Request):\n if Request.user.is_superuser:\n return redirect(\"/admin\")\n else:\n buyer = Buyer.objects.get(username=Request.user.username)\n if Request.method == 'POST':\n buyer.name = Request.POST.get(\"name\")\n buyer.email = Request.POST.get(\"email\")\n buyer.phone = Request.POST.get(\"phone\")\n buyer.addressline1 = Request.POST.get(\"addressline1\")\n buyer.addressline2 = Request.POST.get(\"addressline2\")\n buyer.addressline3 = Request.POST.get(\"addressline3\")\n buyer.pin = Request.POST.get(\"pin\")\n buyer.city = Request.POST.get(\"city\")\n buyer.state = Request.POST.get(\"state\")\n print(Request.FILES.get('pic'), \"\\n\\n\\n\\n\")\n if (Request.FILES.get('pic')):\n buyer.pic = Request.FILES.get(\"pic\")\n buyer.save()\n return redirect(\"/profile\")\n return render(Request, \"update-profile.html\", {'user': buyer})\n\n\ndef addToCart(Request, id):\n cart = Request.session.get('cart')\n p = Product.objects.get(id=id)\n if cart:\n if str(id) in cart:\n cart[str(id)]['qty'] += 1\n cart[str(id)]['total'] += cart[str(id)]['price']\n else:\n cart.setdefault(str(p.id), {'name': p.name, 'pic': p.pic1.url, 'color': p.color, 'size': p.size, 'price': p.finalprice, 'qty': 1,\n 'total': p.finalprice, 'maincategory': p.maincategory.name, 'subcategory': p.subcategory.name, 'brand': p.brand.name})\n print(cart, \"veer\")\n else:\n cart = {str(p.id): {'name': p.name, 'pic': p.pic1.url, 'color': p.color, 'size': p.size, 'price': p.finalprice, 'qty': 1,\n 'total': p.finalprice, 'maincategory': p.maincategory.name, 'subcategory': p.subcategory.name, 'brand': p.brand.name}}\n Request.session['cart'] = cart\n Request.session.set_expiry(60*60*60*24)\n return redirect(\"/cart-page\")\n\n\ndef cartPage(Request):\n cart = Request.session.get('cart')\n shipping = 0\n total = 0\n final=0\n if cart is None:\n cart = {}\n else:\n for value in cart.values():\n total += value['total']\n if total <= 1000:\n shipping = 150\n final = shipping+total\n return render(Request, 'cart.html', {'cart': cart, 'total': total, 'shipping': shipping, 'final': final})\n\n\ndef deleteCart(Request, id):\n cart = Request.session.get('cart')\n print(cart)\n if str(id) in cart:\n cart.pop(str(id))\n Request.session['cart'] = cart\n return redirect('/cart-page')\n\n\ndef updateCart(Request, id, action):\n cart = Request.session.get('cart')\n if str(id) in cart:\n if action == 'inc':\n cart[str(id)]['qty'] += 1\n cart[str(id)]['total'] += cart[str(id)]['price']\n elif action == 'dec' and cart[str(id)]['qty'] > 1:\n cart[str(id)]['qty'] -= 1\n cart[str(id)]['total'] -= cart[str(id)]['price']\n Request.session['cart'] = cart\n return redirect('/cart-page')\n\n\n@login_required(login_url='/login')\ndef addToWishlist(Request, pid):\n try:\n user = Buyer.objects.get(username=Request.user.username)\n product = Product.objects.get(id=pid)\n try:\n wishlist = Wishlist.objects.get(user=user, product=product)\n except:\n w = Wishlist()\n w.user = user\n w.product = product\n w.save()\n return redirect('/profile')\n except:\n return redirect('/admin')\n\n\ndef deleteWishlist(Request, id):\n print(id, \"\\n\\n\\n\")\n wishlist = Wishlist.objects.get(id=id)\n wishlist.delete()\n return redirect(\"/profile\")\n\n\n@login_required(login_url='/login')\ndef checkOut(Request):\n cart = Request.session.get('cart')\n shipping = 0\n total = 0\n for i in cart.values():\n total += i['total']\n if total < 1000:\n shipping = 150\n final = shipping+total\n try:\n user = Buyer.objects.get(username=Request.user.username)\n return render(Request, 'checkout.html', {'user': user, 'cart': cart, 'shipping': shipping, 'total': total, 'final': final})\n except:\n return redirect('/admin')\n\nclient=razorpay.Client(auth=(RAZOR_KEY_ID,RAZOR_KEY_SECRET))\n@login_required(login_url='/login')\ndef orderPage(Request):\n if Request.method == \"POST\":\n user = Buyer.objects.get(username=Request.user.username)\n cart = Request.session.get('cart')\n if cart:\n total = 0\n shipping = 0\n check = Checkout()\n check.user = user\n for i in cart:\n total += cart[i]['total']\n if total < 1000 and total > 0:\n shipping = 150\n final = shipping+total\n check.shipping = shipping\n check.total = total\n check.final = final\n check.save()\n for i in cart:\n cp = CheckoutProducts()\n cp.checkout = check\n cp.qty = cart[str(i)]['qty']\n cp.total = cart[str(i)]['total']\n cp.product = Product.objects.get(id=i)\n cp.save()\n subject = \"Your Order has been Placed : Team Eshop\"\n message = 'Hello'+str(user.name)+\"\\n Thanks to Shop with us.\\nNow You can Track Your Order on Profile Page\\nTeam Eshop\"\n recipient_list = [user.email]\n email_from=settings.EMAIL_HOST_USER\n send_mail(subject, message, email_from,recipient_list)\n Request.session['cart'] = {}\n if Request.POST.get('mode')!='COD': \n orderAmount=check.final*100\n orderCurrency=\"INR\"\n paymentOrder=client.order.create(dict(amount=orderAmount,currency=orderCurrency,payment_capture=1))\n paymentId=paymentOrder['id']\n check.mode=1\n check.save()\n return render(Request,'pay.html',{'amount':orderAmount,'api_key':RAZOR_KEY_ID,'order_id':paymentId,'User':user})\n return redirect('/confirmation') \n else:\n return redirect('/cart-page')\n else:\n return redirect('/check-out')\n \n@login_required(login_url='/login/')\ndef paymentSuccess(Request,rppid,rpoid,rpsid):\n buyer=Buyer.objects.get(username=Request.user)\n check=Checkout.objects.filter(buyer=buyer)\n check=check[-1]\n check.rppid=rppid\n check.status=1\n check.save()\n return redirect('/confirmation/')\n\n@login_required(login_url='/login')\ndef confirmationPage(Request):\n return render(Request, 'confirmation.html')\n\n\ndef contactPage(Request):\n if Request.method == \"POST\":\n contact = Contact()\n contact.name = Request.POST.get('name')\n contact.email = Request.POST.get('email')\n contact.phone = Request.POST.get('phone')\n contact.subject = Request.POST.get('subject')\n contact.message = Request.POST.get('message')\n contact.save()\n messages.success(\n Request, \"Thanks to Share Your Query with US!! Our Team will contact soon You\")\n return render(Request, 'contact.html')\n\n\ndef searchPage(Request):\n if Request.method == \"POST\":\n print(\"post\\n\\n\\n\\n\")\n maincategory = Maincategory.objects.all()\n subcategory = Subcategory.objects.all()\n brand = Brand.objects.all()\n search = Request.POST.get('search')\n print(search, \"\\n\\n\\n\")\n data = Product.objects.filter(Q(name__icontains=search) | Q(color__icontains=search) | Q(\n stock__icontains=search) | Q(description__contains=search))\n return render(Request, 'shop.html', {'maincategory': maincategory, 'subcategory': subcategory, 'brand': brand, 'data': data, 'mc': 'All', 'sc': 'All', 'br': 'All'})\n\n\ndef forgetUsername(Request):\n if Request.method == \"POST\":\n username = Request.POST.get('username')\n try:\n user = User.objects.get(username=username)\n if user.is_superuser:\n redirect('/admin')\n else:\n buyer = Buyer.objects.get(username=username)\n Request.session['resetusername']=username\n otp = randrange(10000, 99999)\n buyer.otp = otp\n buyer.save()\n subject = \"OTP for Password Reset : Team Eshop\"\n message = ' OTP for Password Reset is '+str(otp)+\"\\nTeam Eshop\"\n recipient_list = [buyer.email]\n email_from=settings.EMAIL_HOST_USER\n send_mail(subject, message, email_from,recipient_list)\n return redirect('/enter-otp')\n except:\n messages.error(Request,'Invalid Username \"Try to put valid Username\" ')\n return render(Request, 'forget-username.html')\n\ndef enterOtp(Request):\n if Request.method==\"POST\":\n otp=Request.POST.get('otp')\n try:\n user=Buyer.objects.get(username=Request.session.get('resetusername'))\n if user.otp==int(otp):\n return redirect('/forget-password')\n else:\n messages.error(Request,\"Invalid OTP\")\n except:\n messages.error(Request,\"Unauthorized User\")\n return render(Request,'enter-otp.html')\n\ndef forgetPassword(Request):\n if Request.method==\"POST\":\n p=Request.POST.get('password')\n cp=Request.POST.get('cpassword')\n if p==cp:\n user=User.objects.get(username=Request.session.get('resetusername'))\n user.set_password(p)\n user.save()\n return redirect('/login')\n else:\n messages.error(Request,\"Your Password and Confirm Password not matched\")\n return render(Request,'forget-password.html')\n\n\n", "repo_name": "satveer-57/E-Shop", "sub_path": "mainApp/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 14537, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "django.shortcuts.render", "line_number": 20, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 49, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 54, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 61, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 63, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 64, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 66, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 66, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 67, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 71, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 72, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 85, "usage_type": "call"}, {"api_name": "django.conf.settings.EMAIL_HOST_USER", "line_number": 93, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 93, "usage_type": "name"}, {"api_name": "django.core.mail.send_mail", "line_number": 94, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 95, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 97, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 97, "usage_type": "name"}, {"api_name": "django.contrib.messages.error", "line_number": 99, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 99, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 101, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 106, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 106, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 106, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 108, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 113, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 104, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 119, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 136, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 137, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 116, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 156, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 172, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 181, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 194, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 209, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 211, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 197, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 218, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 233, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 235, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 221, "usage_type": "call"}, {"api_name": "razorpay.Client", "line_number": 237, "usage_type": "call"}, {"api_name": "eshop.settings.RAZOR_KEY_ID", "line_number": 237, "usage_type": "name"}, {"api_name": "eshop.settings.RAZOR_KEY_SECRET", "line_number": 237, "usage_type": "name"}, {"api_name": "django.conf.settings.EMAIL_HOST_USER", "line_number": 267, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 267, "usage_type": "name"}, {"api_name": "django.core.mail.send_mail", "line_number": 268, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 277, "usage_type": "call"}, {"api_name": "eshop.settings.RAZOR_KEY_ID", "line_number": 277, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 278, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 280, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 282, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 238, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 292, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 284, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 296, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 294, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 308, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 308, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 310, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 321, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 322, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 323, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 330, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 330, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 330, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 332, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 336, "usage_type": "call"}, {"api_name": "django.conf.settings.EMAIL_HOST_USER", "line_number": 342, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 342, "usage_type": "name"}, {"api_name": "django.core.mail.send_mail", "line_number": 343, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 344, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 346, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 346, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 347, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 355, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 357, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 357, "usage_type": "name"}, {"api_name": "django.contrib.messages.error", "line_number": 359, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 359, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 360, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 367, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 367, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 367, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 370, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 372, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 372, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 373, "usage_type": "call"}]} +{"seq_id": "16479962087", "text": "import torch\nimport torch.nn as nn\nimport numpy as np\nimport scipy.sparse as sp\nimport torch.nn.functional as F\n\nfrom models.hypersagnn import HyperSAGNN\nfrom models.my_gcn_conv import GCNConv\nfrom models.transformer_model import TransformerModel\nfrom utils.gnn_utils import normalize_adj\n\n\nclass HyperTeNet(torch.nn.Module):\n def __init__(self, params, device='cuda:0'):\n super(HyperTeNet, self).__init__()\n self.params = params\n\n # embedding matrices\n self.user_list_item_embeddings = torch.nn.Embedding(params.num_user + params.num_list + params.num_item, params.num_factors)\n self.fc1 = torch.nn.Linear(params.num_factors, 1)\n self.fc2 = torch.nn.Linear(params.num_factors, 1)\n self.fc3 = torch.nn.Linear(params.hid_units[-1], 1)\n self.fc4 = torch.nn.Linear(params.hid_units[-1], 1)\n\n self.user_item_list_dropout = torch.nn.Dropout(1.0 - params.keep_prob) ## keep_prob\n self.dropout1 = torch.nn.Dropout(1.0 - params.keep_prob) ## keep_prob\n self.dropout2 = torch.nn.Dropout(1.0 - params.keep_prob) ## keep_prob\n self.sigmoid = torch.nn.Sigmoid()\n\n # weight initialization\n ##torch.nn.init.xavier_uniform_(self.user_list_item_embeddings.weight)\n torch.nn.init.xavier_normal_(self.user_list_item_embeddings.weight)\n torch.nn.init.zeros_(self.user_list_item_embeddings.weight[0]) ## check in the successive iterations that this is kept zero\n torch.nn.init.zeros_(self.user_list_item_embeddings.weight[self.params.num_user])\n torch.nn.init.zeros_(self.user_list_item_embeddings.weight[self.params.num_user + self.params.num_list])\n\n # gnn ==========================\n self.user_indices = torch.tensor(np.array(range(params.num_user))).to(device).long()\n self.list_indices = torch.tensor(np.array(range(params.num_list))).to(device).long()\n self.item_indices = torch.tensor(np.array(range(params.num_item))).to(device).long()\n\n self.user_conv1 = GCNConv(params.num_factors, params.hid_units[-2], cached=True, normalize=True,add_self_loops=True) ##normalize=True\n self.user_conv2 = GCNConv(params.hid_units[-2], params.hid_units[-1], cached=True, normalize=True,add_self_loops=True)\n\n self.list_conv1 = GCNConv(params.num_factors, params.hid_units[-2], cached=True, normalize=True,add_self_loops=True)\n self.list_conv2 = GCNConv(params.hid_units[-2], params.hid_units[-1], cached=True, normalize=True,add_self_loops=True)\n\n self.item_conv1 = GCNConv(params.num_factors, params.hid_units[-2], cached=True, normalize=True,add_self_loops=True)\n self.item_conv2 = GCNConv(params.hid_units[-2], params.hid_units[-1], cached=True, normalize=True,add_self_loops=True)\n\n\n if params.args.knn_graph == 'True':\n self.user_param_indices = params.dataset_obj.user_edge_index\n self.list_param_indices = params.dataset_obj.list_edge_index\n self.item_param_indices = params.dataset_obj.item_edge_index\n self.user_param_weights, self.list_param_weights, self.item_param_weights = None, None, None ##crucial to note\n else:\n self.user_adj_mat = params.user_adj_mat.tocoo()\n self.user_adj_mat.setdiag(0); self.user_adj_mat.eliminate_zeros()\n #pdb.set_trace()\n self.user_param_indices = torch.LongTensor(self.user_adj_mat.nonzero()).to(device)\n self.user_param_weights = torch.FloatTensor(self.user_adj_mat.data).to(device) ##weight check\n\n self.list_adj_mat = params.list_adj_mat.tocoo()\n self.list_adj_mat.setdiag(0); self.list_adj_mat.eliminate_zeros()\n self.list_param_indices = torch.LongTensor(self.list_adj_mat.nonzero()).to(device)\n self.list_param_weights = torch.FloatTensor(self.list_adj_mat.data).to(device) ##weight check\n\n self.item_adj_mat = params.item_adj_mat.tocoo()\n self.item_adj_mat.setdiag(0); self.item_adj_mat.eliminate_zeros()\n self.item_param_indices = torch.LongTensor(self.item_adj_mat.nonzero()).to(device)\n self.item_param_weights = torch.FloatTensor(self.item_adj_mat.data).to(device) ##weight check\n if params.args.user_adj_weights == 'False':\n self.user_param_weights, self.list_param_weights, self.item_param_weights = None, None, None ##crucial to note\n\n # dropouts gnn part\n self.user_gnn_dropout = torch.nn.Dropout(1.0 - params.gnn_keep_prob) ## keep_prob\n self.list_gnn_dropout = torch.nn.Dropout(1.0 - params.gnn_keep_prob) ## keep_prob\n self.item_gnn_dropout = torch.nn.Dropout(1.0 - params.gnn_keep_prob) ## keep_prob\n\n # seq part ============================================================================\n self.pos_embeddings = torch.nn.Embedding(params.max_item_seq_length, params.hid_units[-1])\n\n self.user_dropout = torch.nn.Dropout(1.0 - params.keep_prob) ## keep_prob\n self.list_dropout = torch.nn.Dropout(1.0 - params.keep_prob) ## keep_prob\n self.item_dropout = torch.nn.Dropout(1.0 - params.keep_prob) ## keep_prob\n self.item_dropout1 = torch.nn.Dropout(1.0 - params.keep_prob) ## keep_prob\n self.item_dropout2 = torch.nn.Dropout(1.0 - params.keep_prob) ## keep_prob\n self.item_dropout3 = torch.nn.Dropout(1.0 - params.keep_prob) ## keep_prob\n self.item_dropout4 = torch.nn.Dropout(1.0 - params.keep_prob) ## keep_prob\n self.item_dropout5 = torch.nn.Dropout(1.0 - params.keep_prob) ## keep_prob\n self.item_dropout6 = torch.nn.Dropout(1.0 - params.keep_prob) ## keep_prob\n self.item_dropout7 = torch.nn.Dropout(1.0 - params.keep_prob) ## keep_prob\n self.pos_dropout = torch.nn.Dropout(1.0 - params.keep_prob) ## keep_prob\n\n self.sigmoid_seq = torch.nn.Sigmoid()\n\n # transormer model ======================\n self.trans_model = TransformerModel(ntoken=params.num_item, ninp=params.hid_units[-1], nhead=params.n_heads[-1],\n nhid=params.hid_units[-1], nlayers=params.num_layers, dropout=0.3)\n self.layer_norm = nn.LayerNorm(params.hid_units[-1])\n\n # hgnn ==================================\n self.hypersagnn_model = HyperSAGNN(n_head=params.n_heads[0], d_model=params.hid_units[-1], d_k=params.hid_units[-1], d_v=params.hid_units[-1],\n node_embedding=self.user_list_item_embeddings,\n diag_mask=True, bottle_neck=params.hid_units[-1],\n dropout=1.0-params.net_keep_prob).to(device)\n\n\n\n def get_emb_user(self, x, mask=None, get_outlier=None, return_recon = False):\n emb = self.user_list_item_embeddings(x)\n #emb = self.user_item_list_dropout(emb)\n output = emb[:,0] * emb[:,2] #user-item\n #output = emb[:,1] * emb[:,2] #list-item\n #output = emb[:,0] * emb[:,1] * emb[:,2] #user-list-item\n output = self.user_item_list_dropout(output)\n #output = self.sigmoid(torch.sum(output,axis=1)) #self.user_item_list_dropout(output)\n output = self.sigmoid(self.fc1(output).reshape(-1)) #self.user_item_list_dropout(output)\n return output\n\n def get_emb_list(self, x, mask=None, get_outlier=None, return_recon = False):\n emb = self.user_list_item_embeddings(x)\n #emb = self.user_item_list_dropout(emb)\n output = emb[:,1] * emb[:,2] #user-item\n #output = emb[:,1] * emb[:,2] #list-item\n #output = emb[:,0] * emb[:,1] * emb[:,2] #user-list-item\n output = self.user_item_list_dropout(output)\n #output = self.sigmoid(torch.sum(output,axis=1)) #self.user_item_list_dropout(output)\n output = self.sigmoid(self.fc1(output).reshape(-1)) #self.user_item_list_dropout(output)\n return output\n\n def get_emb_user_list(self, x, mask=None, get_outlier=None, return_recon = False):\n emb = self.user_list_item_embeddings(x)\n output_user = emb[:,0] * emb[:,2] #user-item\n output_list = emb[:,1] * emb[:,2] #list-item\n output_user = self.dropout1(output_user)\n output_list = self.dropout2(output_list)\n output = self.sigmoid(self.fc1(output_user).reshape(-1) + self.fc2(output_list).reshape(-1)) #self.user_item_list_dropout(output)\n return output\n\n def get_emb_all_mult(self, x, mask=None, get_outlier=None, return_recon = False):\n emb = self.user_list_item_embeddings(x)\n output = emb[:,0] * emb[:,1] * emb[:,2] #user-list-item\n output = self.dropout1(output)\n output = self.sigmoid(self.fc1(output).reshape(-1)) #self.user_item_list_dropout(output)\n return output\n\n def get_emb_all_mult2(self, x, user_list_item_embeddings, mask=None, get_outlier=None, return_recon = False):\n emb = self.user_list_item_embeddings(x)\n output = emb[:,0] * emb[:,1] * emb[:,2] #user-list-item\n output = self.dropout1(output)\n output = self.sigmoid(self.fc1(output).reshape(-1)) #self.user_item_list_dropout(output)\n return output\n\n def get_emb_user_list2(self, x, user_list_item_embeddings, mask=None, get_outlier=None, return_recon = False):\n emb = user_list_item_embeddings[x]\n output_user = emb[:,0] * emb[:,2] #user-item\n output_list = emb[:,1] * emb[:,2] #list-item\n output_user = self.dropout1(output_user)\n output_list = self.dropout2(output_list)\n output = self.sigmoid(self.fc1(output_user).reshape(-1) + self.fc2(output_list).reshape(-1)) #self.user_item_list_dropout(output)\n #output = self.fc1(output_user).reshape(-1) + self.fc2(output_list).reshape(-1) #self.user_item_list_dropout(output)\n return output\n\n def get_emb_user_list3(self, x, user_list_item_embeddings, mask=None, get_outlier=None, return_recon = False):\n emb = user_list_item_embeddings[x]\n output_user = emb[:,0] * emb[:,2] #user-item\n output_list = emb[:,1] * emb[:,2] #list-item\n output_user = self.dropout1(output_user)\n output_list = self.dropout2(output_list)\n output = self.sigmoid(self.fc3(output_user).reshape(-1) + self.fc4(output_list).reshape(-1)) #self.user_item_list_dropout(output)\n return output\n\n def forward(self, user_indices, list_indices, item_indices=None, item_seq=None, item_seq_pos=None, item_seq_neg=None, test_item_indices=None, param5=None, train=True, network='gnn', include_hgnn=False):\n #def forward(self, user_indices, list_indices, item_seq, item_seq_pos=None, item_seq_neg=None, test_item_indices=None, param5=None, train=True,network='seq'):\n\n # gnn_user ==============================\n user_x = self.user_list_item_embeddings(self.user_indices.long())\n user_x = F.relu(self.user_conv1(user_x, self.user_param_indices, self.user_param_weights))\n user_x = self.user_gnn_dropout(user_x)\n user_x = self.user_conv2(user_x, self.user_param_indices, self.user_param_weights)\n\n # gnn_list ==============================\n list_x = self.user_list_item_embeddings(self.params.num_user+self.list_indices.long())\n list_x = F.relu(self.list_conv1(list_x, self.list_param_indices, self.list_param_weights))\n list_x = self.list_gnn_dropout(list_x)\n list_x = self.list_conv2(list_x, self.list_param_indices, self.list_param_weights)\n\n # gnn_item ==============================\n item_x = self.user_list_item_embeddings(self.params.num_user+self.params.num_list+self.item_indices.long())\n item_x = F.relu(self.item_conv1(item_x, self.item_param_indices, self.item_param_weights))\n item_x = self.item_gnn_dropout(item_x)\n item_x = self.item_conv2(item_x, self.item_param_indices, self.item_param_weights)\n\n user_list_item_gnn_emb = torch.cat([user_x, list_x, item_x],dim=0)\n # common part ending ========================================================================\n\n # gnn ===================================\n if network == 'gnn':\n x = torch.cat([user_indices.reshape(-1,1),\n list_indices.reshape(-1,1) + self.params.num_user,\n item_indices.reshape(-1,1) + self.params.num_user + self.params.num_list],\n dim=1)\n\n\n self.edge_probs_gnn = self.get_emb_user_list3(x,user_list_item_gnn_emb)\n\n # hgnn =======================\n if include_hgnn == True:\n self.edge_probs_hgnn = self.hypersagnn_model(x, user_list_item_gnn_emb).reshape(-1)\n self.edge_probs = (self.edge_probs_hgnn + self.edge_probs_gnn)/2\n else:\n self.edge_probs = self.edge_probs_gnn\n ##self.edge_probs = self.edge_probs_hgnn\n ##self.edge_probs = self.edge_probs_gnn\n return self.edge_probs\n\n # seq ===================================\n elif network == 'seq':\n flag_tran = True\n if train == False:\n user_indices = user_indices.reshape(-1,101)[:,0]\n list_indices = list_indices.reshape(-1,101)[:,0]\n item_seq = item_seq.reshape(-1,101,self.params.max_item_seq_length)[:,0,:] ##101\n\n self.user_embeds = user_x[user_indices]\n self.list_embeds = list_x[list_indices]\n self.user_embeds = self.user_dropout(self.user_embeds)\n self.list_embeds = self.list_dropout(self.list_embeds)\n self.mask = (item_seq != 0).float()\n\n self.item_seq_embeds = item_x[item_seq]\n self.item_seq_embeds += (self.user_embeds.reshape(-1,1,self.params.hid_units[-1]) + self.list_embeds.reshape(-1,1,self.params.hid_units[-1]))\n self.item_seq_embeds += self.pos_embeddings.weight ##check this carefullly\n self.item_seq_embeds *= self.mask.reshape(item_seq.shape[0], item_seq.shape[1], 1)\n\n if flag_tran == True:\n self.out_trans = self.trans_model(self.item_seq_embeds.transpose(1,0)).transpose(1,0) ##posemb\n self.item_seq_embeds = self.out_trans\n\n if train == True:\n self.item_seq_pos_embeds = item_x[item_seq_pos]\n self.item_seq_neg_embeds = item_x[item_seq_neg]\n self.item_seq_pos_embeds = self.item_dropout3(self.item_seq_pos_embeds)\n self.item_seq_neg_embeds = self.item_dropout4(self.item_seq_neg_embeds)\n self.is_target = (item_seq_pos != 0).float()\n\n self.user_item_seq_pos_embeds = self.user_embeds.reshape(-1,1,self.params.hid_units[-1]) * self.item_seq_pos_embeds\n self.list_item_seq_pos_embeds = self.list_embeds.reshape(-1,1,self.params.hid_units[-1]) * self.item_seq_pos_embeds\n self.item_seq_and_seq_pos_embeds = self.item_seq_embeds * self.item_seq_pos_embeds\n\n self.user_item_seq_neg_embeds = self.user_embeds.reshape(-1,1,self.params.hid_units[-1]) * self.item_seq_neg_embeds\n self.list_item_seq_neg_embeds = self.list_embeds.reshape(-1,1,self.params.hid_units[-1]) * self.item_seq_neg_embeds\n self.item_seq_and_seq_neg_embeds = self.item_seq_embeds * self.item_seq_neg_embeds\n\n self.pos_logits = self.sigmoid(torch.sum(self.user_item_seq_pos_embeds + self.list_item_seq_pos_embeds + self.item_seq_and_seq_pos_embeds, axis=-1))\n self.neg_logits = self.sigmoid(torch.sum(self.user_item_seq_neg_embeds + self.list_item_seq_neg_embeds + self.item_seq_and_seq_neg_embeds, axis=-1))\n\n return self.pos_logits, self.neg_logits, self.is_target\n\n elif train == False:\n self.test_item_embeds = item_x[test_item_indices]\n self.item_seq_embeds = self.item_seq_embeds.view(-1,1,self.params.max_item_seq_length,self.params.hid_units[-1]).repeat(1,101,1,1).view(-1,self.params.max_item_seq_length,self.params.hid_units[-1])\n self.list_embeds = self.list_embeds.view(-1,1,self.params.hid_units[-1]).repeat(1,101,1).view(-1,self.params.hid_units[-1])\n self.user_embeds = self.user_embeds.view(-1,1,self.params.hid_units[-1]).repeat(1,101,1).view(-1,self.params.hid_units[-1])\n\n self.pos_logits = self.sigmoid(torch.sum((self.item_seq_embeds[:,-1,:] + self.list_embeds + self.user_embeds) * self.test_item_embeds,axis=-1))\n return self.pos_logits\n\n", "repo_name": "mvijaikumar/HyperTeNet", "sub_path": "models/hypertenet.py", "file_name": "hypertenet.py", "file_ext": "py", "file_size_in_byte": 17652, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "94", "api": [{"api_name": "torch.nn", "line_number": 13, "usage_type": "attribute"}, {"api_name": "torch.nn.Embedding", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "attribute"}, {"api_name": "torch.nn.Dropout", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "attribute"}, {"api_name": "torch.nn.Dropout", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "attribute"}, {"api_name": "torch.nn.Dropout", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 27, "usage_type": "attribute"}, {"api_name": "torch.nn.Sigmoid", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 28, "usage_type": "attribute"}, {"api_name": "torch.nn.init.xavier_normal_", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "attribute"}, {"api_name": "torch.nn.init.zeros_", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 33, "usage_type": "attribute"}, {"api_name": "torch.nn.init.zeros_", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "attribute"}, {"api_name": "torch.nn.init.zeros_", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 35, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 40, "usage_type": "call"}, {"api_name": "models.my_gcn_conv.GCNConv", "line_number": 42, "usage_type": "call"}, {"api_name": "models.my_gcn_conv.GCNConv", "line_number": 43, "usage_type": "call"}, {"api_name": "models.my_gcn_conv.GCNConv", "line_number": 45, "usage_type": "call"}, {"api_name": "models.my_gcn_conv.GCNConv", "line_number": 46, "usage_type": "call"}, {"api_name": "models.my_gcn_conv.GCNConv", "line_number": 48, "usage_type": "call"}, {"api_name": "models.my_gcn_conv.GCNConv", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.nn.Dropout", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 77, "usage_type": "attribute"}, {"api_name": "torch.nn.Dropout", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 78, "usage_type": "attribute"}, {"api_name": "torch.nn.Dropout", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 79, "usage_type": "attribute"}, {"api_name": "torch.nn.Embedding", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 82, "usage_type": "attribute"}, {"api_name": "torch.nn.Dropout", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 84, "usage_type": "attribute"}, {"api_name": "torch.nn.Dropout", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 85, "usage_type": "attribute"}, {"api_name": "torch.nn.Dropout", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 86, "usage_type": "attribute"}, {"api_name": "torch.nn.Dropout", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 87, "usage_type": "attribute"}, {"api_name": "torch.nn.Dropout", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 88, "usage_type": "attribute"}, {"api_name": "torch.nn.Dropout", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 89, "usage_type": "attribute"}, {"api_name": "torch.nn.Dropout", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 90, "usage_type": "attribute"}, {"api_name": "torch.nn.Dropout", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 91, "usage_type": "attribute"}, {"api_name": "torch.nn.Dropout", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 92, "usage_type": "attribute"}, {"api_name": "torch.nn.Dropout", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 93, "usage_type": "attribute"}, {"api_name": "torch.nn.Dropout", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 94, "usage_type": "attribute"}, {"api_name": "torch.nn.Sigmoid", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 96, "usage_type": "attribute"}, {"api_name": "models.transformer_model.TransformerModel", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.nn.LayerNorm", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 101, "usage_type": "name"}, {"api_name": "models.hypersagnn.HyperSAGNN", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 180, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 180, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 186, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 186, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 192, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 192, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 196, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 201, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 257, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 258, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 268, "usage_type": "call"}]} +{"seq_id": "30176408998", "text": "import os\nimport time\n\nimport ray\nimport torch\nfrom torch.nn.utils import clip_grad_norm_\nfrom loguru import logger\nfrom pql.models import model_name_to_path\nfrom pql.utils.common import Tracker\nfrom pql.utils.common import load_class_from_path\nfrom pql.utils.common import normalize\nfrom pql.utils.model_util import load_model\n\n\n@ray.remote(num_gpus=0.3)\nclass PQLPLearner:\n def __init__(self, obs_dim, action_dim, cfg):\n self.cfg = cfg\n self.obs_dim = obs_dim\n self.action_dim = action_dim\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \",\".join([str(i) for i in range(cfg.available_gpus)])\n self.device = torch.device(f\"cuda:{self.cfg.algo.p_learner_gpu}\")\n\n act_class = load_class_from_path(self.cfg.algo.act_class,\n model_name_to_path[self.cfg.algo.act_class])\n self.actor = act_class(self.obs_dim, self.action_dim).to(self.device)\n if self.cfg.artifact is not None:\n load_model(self.actor, \"actor\", cfg)\n self.actor_optimizer = torch.optim.AdamW(self.actor.parameters(), self.cfg.algo.actor_lr)\n self.critic = None\n\n obs_dim = (self.obs_dim,) if isinstance(self.obs_dim, int) else self.obs_dim\n self.memory_size = int(self.cfg.algo.memory_size)\n self.memory = torch.empty((self.memory_size,) + (*obs_dim,), dtype=torch.float32, device=self.device)\n self.next_p = 0\n self.if_full = False\n self.cur_capacity = 0\n\n self.loss_tracker = Tracker(5)\n self.update_count = 0\n self.normalize_tuple = None\n self.sleep_time = 0.01\n\n def start(self):\n return self.actor, self.update_count, self.loss_tracker.mean()\n\n def learn(self):\n if self.critic is not None:\n indices = torch.randint(self.cur_capacity, size=(self.cfg.algo.batch_size,), device=self.device)\n obs = self.memory[indices].clone()\n if self.cfg.algo.obs_norm:\n obs = normalize(obs, self.normalize_tuple)\n\n self.critic.requires_grad_(False)\n action = self.actor(obs)\n actor_Q = self.critic.get_q_min(obs, action)\n actor_loss = -actor_Q.mean()\n self.optimizer_update(self.actor_optimizer, actor_loss)\n self.critic.requires_grad_(True)\n\n self.update_count += 1\n self.loss_tracker.update(actor_loss.detach().item())\n\n return self.sleep_time\n\n def update(self, critic, obs, normalize_tuple, sleep_time):\n self.critic = critic\n self.sleep_time = sleep_time\n self.normalize_tuple = normalize_tuple\n\n obs_dim = (self.obs_dim,) if isinstance(self.obs_dim, int) else self.obs_dim\n obs = obs.reshape(-1, *obs_dim)\n self.add_capacity = obs.shape[0]\n p = self.next_p + self.add_capacity\n if p > self.memory_size:\n self.memory[self.next_p:self.memory_size] = obs[:self.memory_size - self.next_p]\n p = p - self.memory_size\n self.memory[0:p] = obs[-p:]\n self.if_full = True\n else:\n self.memory[self.next_p:p] = obs\n self.next_p = p # update pointer\n self.cur_capacity = self.memory_size if self.if_full else self.next_p\n\n return self.actor, self.loss_tracker.mean(), self.update_count\n\n def optimizer_update(self, optimizer, objective):\n optimizer.zero_grad(set_to_none=True)\n objective.backward()\n if self.cfg.algo.max_grad_norm is not None:\n grad_norm = clip_grad_norm_(parameters=optimizer.param_groups[0][\"params\"],\n max_norm=self.cfg.algo.max_grad_norm)\n else:\n grad_norm = None\n optimizer.step()\n return grad_norm\n\n\n@ray.remote\ndef asyn_p_learner(p_learner, cfg):\n logger.warning(f\"P-Learner starts running asynchronously on GPU {cfg.algo.p_learner_gpu}\")\n while True:\n sleep_time = ray.get(p_learner.learn.remote())\n time.sleep(sleep_time)\n", "repo_name": "Improbable-AI/pql", "sub_path": "pql/algo/pql_p_learner.py", "file_name": "pql_p_learner.py", "file_ext": "py", "file_size_in_byte": 4006, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 43, "dataset": "github-code", "pt": "94", "api": [{"api_name": "os.environ", "line_number": 21, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 22, "usage_type": "call"}, {"api_name": "pql.utils.common.load_class_from_path", "line_number": 24, "usage_type": "call"}, {"api_name": "pql.models.model_name_to_path", "line_number": 25, "usage_type": "name"}, {"api_name": "pql.utils.model_util.load_model", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.optim.AdamW", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 29, "usage_type": "attribute"}, {"api_name": "torch.empty", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pql.utils.common.Tracker", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.randint", "line_number": 49, "usage_type": "call"}, {"api_name": "pql.utils.common.normalize", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.nn.utils.clip_grad_norm_", "line_number": 91, "usage_type": "call"}, {"api_name": "ray.remote", "line_number": 15, "usage_type": "call"}, {"api_name": "loguru.logger.warning", "line_number": 101, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 101, "usage_type": "name"}, {"api_name": "ray.get", "line_number": 103, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 104, "usage_type": "call"}, {"api_name": "ray.remote", "line_number": 99, "usage_type": "attribute"}]} +{"seq_id": "1053027160", "text": "\nfrom asset import *\nfrom config import BRIDGE_URL, INFO_URL\n\nbridge_url = BRIDGE_URL\nbasicInfo = sess.get(INFO_URL).json()\n\n\ndef bridgeAssets(from_asset, to_asset, amount):\n headers = {\"origin\": \"prod\"}\n url = BRIDGE_URL + \"?token={}&srcchain[]={}&dstchain={}&amount={}\".format(from_asset.symbol, from_asset.place, to_asset.place, amount)\n response = GET(url, headers=headers, cachetime=600)\n existing_bridges = set()\n if response and response.status_code == 200:\n bridges = response.json()\n for bridge in bridges:\n bridge_name = bridge[\"bridge\"].lower()\n if bridge_name in existing_bridges or bridge_name in CEXWHITELIST or bridge_name in BRIDGE_BLACKLIST:\n continue\n if to_asset.place != \"StarkNet\": # StarkNet has a different contract address system\n if bridge[\"srctoken_contract\"].lower() != from_asset.address.lower() or bridge[\"dsttoken_contract\"].lower() != to_asset.address.lower():\n continue\n existing_bridges.add(bridge_name)\n website = \"\"\n if bridge_name in basicInfo:\n website = basicInfo[bridge_name][\"url\"]\n bridge_name = basicInfo[bridge_name][\"display_name\"]\n if bridge[\"fee_status\"] == \"ok\":\n Chain2Bridge_ChainEye(from_asset, to_asset, bridge[\"bridge\"], bridge_name, website, False)\n elif bridge[\"fee_status\"] == \"ok_LOAD\" or bridge[\"fee_status\"] == \"LOAD\":\n Chain2Bridge_ChainEye(from_asset, to_asset, bridge[\"bridge\"], bridge_name, website, True)\n\nfor from_chain in chain_assets:\n if from_chain not in CHAINWHITELIST:\n continue\n for to_chain in SUPPORTED_CHAINS:\n if to_chain not in CHAINWHITELIST:\n continue\n if from_chain == to_chain:\n continue\n from_assets = chain_assets[from_chain]\n to_assets = chain_assets[to_chain]\n for from_asset in from_assets:\n amt = 1000 if from_asset.symbol in [\"USDT\", \"BUSD\", \"DAI\", \"USDC\", \"OP\"] else 0.5\n for to_asset in to_assets:\n if from_asset.symbol != to_asset.symbol:\n continue\n bridgeAssets(from_asset, to_asset, amt) # 这里传入跨链Amount\n\ndef load_bridge():\n res = {}\n for from_asset, route in EDGE_iterate(Chain_Asset_Accurate, Chain2Bridge):\n if route.bridge in BRIDGE_BLACKLIST:\n continue\n print(route, end=\" \", flush=True)\n amt = 1000 if route.from_asset.symbol in [\"USDT\", \"BUSD\", \"DAI\", \"USDC\", \"OP\"] else 0.5\n try:\n out, extrafee = route.quote(amt)\n if extrafee<0.01:\n extrafee = 0\n totalfee = amt*get_price(route.from_asset)-out*get_price(route.to_asset) + extrafee\n diff = amt - out if route.from_asset.symbol == route.to_asset.symbol else 0\n print(out, extrafee, \"=\", totalfee)\n res[route] = (diff, totalfee)\n except:\n print(\"bridge failed\")\n return res\n\n\n", "repo_name": "DeFiEye/ETHShanghai-Chaineye", "sub_path": "L2Bridges/bridge.py", "file_name": "bridge.py", "file_ext": "py", "file_size_in_byte": 3050, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "config.BRIDGE_URL", "line_number": 5, "usage_type": "name"}, {"api_name": "config.INFO_URL", "line_number": 6, "usage_type": "argument"}, {"api_name": "config.BRIDGE_URL", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "26447940008", "text": "#!/bin/python3\n\nimport json, os\nfrom tapipy import Tapis\n\nmsg: dict = json.loads(os.environ['MSG'])\n\n# If this is the last actor in the series, just exit\nif (msg['index']+ 1) == len(msg['tasks']):\n exit\n\n# Increment Index\nmsg['index'] += 1\n\n# Construct new Tapis object capable of sending a message to an actor\nt = Tapis(\n base_url = os.environ['_abaco_api_server'],\n client_id = msg['tacc_credentials']['client_name'],\n access_token = os.environ['_abaco_access_token']\n)\n\n# Send the message\nt.actors.sendMessage(\n actor_id=msg['tasks'][msg['index']]['actor_id'],\n request_body={\n 'message': msg\n } \n)\n\n", "repo_name": "ikewai/climate-automation", "sub_path": "task-base/actor/run_next.py", "file_name": "run_next.py", "file_ext": "py", "file_size_in_byte": 634, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "json.loads", "line_number": 6, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 6, "usage_type": "attribute"}, {"api_name": "tapipy.Tapis", "line_number": 16, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 19, "usage_type": "attribute"}]} +{"seq_id": "20869095080", "text": "import pika, threading, time, random\n\nclass ReceiveThread(threading.Thread):\n\tlock = threading.Lock()\n\tdef __init__(self, queuename, host):\n\t\tthreading.Thread.__init__(self)\n\t\tReceiveThread.lock.acquire()\n\t\tself.queuename = queuename\n\t\tself.host = host\n\t\tReceiveThread.lock.release()\n\tdef run(self):\n\t\tconnectiontemp = pika.BlockingConnection(pika.ConnectionParameters(\n\t\t host=self.host)) #167.205.32.46\n\t\tchanneltemp = connectiontemp.channel()\n\t\tchanneltemp.queue_declare(queue=self.queuename, auto_delete=True)\n\t\tchannel.basic_qos(prefetch_count=1)\n\n\t\tdef callback(ch, method, properties, body):\n\t\t print(str(body))\n\t\t ch.basic_ack(delivery_tag = method.delivery_tag) \n\n\t\tchanneltemp.basic_consume(callback, queue=self.queuename)\n\t\tchanneltemp.start_consuming()\n\n\ndef splitstring(command):\n\tcommandlist = command.split(\" \", maxsplit=1)\n\treturn commandlist\n\ndef qdeclare_callback(method_frame):\n\tprint(\"enter callback\")\n\tglobal nickname\n\tglobal queuename\n\tprint (method_frame)\n\tif (not method_frame): # method_frame is a result from queue_declare:\n\t\tqueuename = random.choice(rand_nick)\n\t\tnickname = queuename\n\t\tchannel.queue_declare(queue=queuename, auto_delete=True, passive=True)\n\t\t\ndef setNickName(nickName):\n\tglobal nickname\n\tglobal queuename\n\tif(nickname == \"\"):\t\n\t\tqueuename = nickName\n\t\tprint(queuename)\n\t\tresult = channel.queue_declare(qdeclare_callback, queue=queuename,passive=True, auto_delete=True)\n\t\tnickname = queuename\n\n\t\treceivethread = ReceiveThread(queuename, host)\n\t\treceivethread.start()\n\telse:\n\t\tnickname = nickName\n\tprint(\"Your nickname is \", nickname)\n\ndef joinChannel(channelname):\n\tchannel.queue_bind(exchange=exchange, queue=queuename, routing_key=channelname)\n\tglobal listchannel\n\tlistchannel += [channelname]\n\tprint(\"You join \", channelname, \" channel\")\n\ndef leaveChannel(channelname):\n\tdef removechannel(channelname):\n\t\tchannel.queue_unbind(queue=queuename, exchange=exchange, routing_key=channelname)\n\t\tlistchannel.remove(channelname)\n\t\tprint(\"You leave \", channelname, \" channel\")\n\tif(channelname in listchannel):\n\t\tremovechannel(channelname)\n\telse:\n\t\tprint(\"No \", channelname, \" channel\")\n\ndef sendMessage(message):\n\tif(len(listchannel) > 0):\n\t\tfor channelname in listchannel:\n\t\t\tmessage = \"[\" + channelname + \"] (\" + nickname + \") \" + message\n\t\t\tchannel.basic_publish(exchange=exchange, routing_key=channelname, body=message)\n\t\t\tmessage = \"\"\n\telse:\n\t\tprint(\"No channel\")\n\ndef sendMessageTo(channelname, message):\n\tmessage = \"[\" + channelname + \"] (\" + nickname + \") \" + message\n\tif(len(listchannel) > 0):\n\t\tif(channelname in listchannel):\n\t\t\tchannel.basic_publish(exchange=exchange, routing_key=channelname, body=message) \n\t\telse:\n\t\t\tprint(\"No \", channelname, \" channel\")\n\telse:\n\t\tprint(\"No channel\")\n\ndef exit():\n\tif(not nickname == \"\"):\n\t\tchannel.queue_delete(queue=queuename)\n\tprint(\"Good bye\")\n\tconnection.close()\n\traise SystemExit \n\ndef callMethod(command):\t\t\n\tcommandlist = splitstring(command)\n\tif(commandlist[0] == \"/NICK\"):\n\t\tsetNickName(commandlist[1])\n\telse:\n\t\tif(commandlist[0] == \"/EXIT\"):\t\n\t\t\texit()\n\t\telse:\n\t\t\tif(not nickname == \"\"):\n\t\t\t\tif(commandlist[0] == \"/JOIN\"):\n\t\t\t\t\tjoinChannel(commandlist[1])\n\t\t\t\telif(commandlist[0] == \"/LEAVE\"):\n\t\t\t\t\tleaveChannel(commandlist[1])\n\t\t\t\telse :\n\t\t\t\t\tif(commandlist[0].startswith('@')):\n\t\t\t\t\t\tif(len(commandlist) > 1):\n\t\t\t\t\t\t\tsplitting = commandlist[0].split(\"@\", maxsplit=1)\n\t\t\t\t\t\t\tsendMessageTo(splitting[1], commandlist[1])\n\t\t\t\t\telse:\n\t\t\t\t\t\tif(not command == \"\"):\n\t\t\t\t\t\t\tsendMessage(command)\n\t\t\telse:\n\t\t\t\tprint(\"Set your nickname first\")\n\ndef sendthread():\n\tcommandtemp = input()\n\tcallMethod(commandtemp)\n\ndef main():\n\tglobal connection\n\tconnection = pika.BlockingConnection(pika.ConnectionParameters(\n\t host=host))\n\tglobal channel\n\tchannel = connection.channel()\n\tchannel.exchange_declare(exchange=exchange, exchange_type=\"topic\", auto_delete=True)\n\n\twhile(True):\n\t\tsendthread()\n\nnickname = \"\"\nqueuename = \"\"\nlistchannel = []\nhost = \"localhost\" # \"167.205.32.46\"\nconnection = None\nchannel = None\nreceivethread = None\nexchange = \"13512023__\"\nrand_nick = ['af', 'ik', 'jun', 'ita', 'sina', 'bela']\n\nmain()\n", "repo_name": "afik/IRC", "sub_path": "rabbitmq/client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 4118, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "threading.Thread", "line_number": 3, "usage_type": "attribute"}, {"api_name": "threading.Lock", "line_number": 4, "usage_type": "call"}, {"api_name": "threading.Thread.__init__", "line_number": 6, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 6, "usage_type": "attribute"}, {"api_name": "pika.BlockingConnection", "line_number": 12, "usage_type": "call"}, {"api_name": "pika.ConnectionParameters", "line_number": 12, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 36, "usage_type": "call"}, {"api_name": "pika.BlockingConnection", "line_number": 127, "usage_type": "call"}, {"api_name": "pika.ConnectionParameters", "line_number": 127, "usage_type": "call"}]} +{"seq_id": "36413141862", "text": "import os\nimport colorsys\nimport time\nimport json\nfrom tkinter import *\nfrom tkinter import ttk, filedialog\nfrom PIL import Image, ImageTk\n\nclass Anota:\n '''\n App to annotate image data.\n '''\n class Box:\n '''\n Creates a bounding box with specified:\n - vertices\n - label\n - color\n '''\n def __init__(self, master, sx, sy, ex, ey, label, color):\n '''\n Constructs the box, label and box bounding the label on the canvas.\n master : (tkinter.Canvas)\n sx, sy : starting vertix (int)\n ex, ey : ending vertix (int)\n label : label for the box (string)\n color : hex color code (string)\n '''\n self.__master = master\n self.start = self.get_real(sx, sy)\n end = self.get_real(ex, ey)\n self.id = master.create_rectangle(self.start + end, outline = color, width = 3)\n x, y = master.bbox(self.id)[:2]\n self.__text = master.create_text(x+2, y+2, text = label, fill = '#000000', font = ('Courier', 8), anchor = 'nw')\n self.__bound = master.create_rectangle(master.bbox(self.__text), fill = color, width = 0)\n master.lift(self.__text)\n \n def destroy(self):\n '''\n Destroys the box, the label and box bounding \n the label from the canvas.\n '''\n for item in (self.id, self.__text, self.__bound):\n self.__master.delete(item)\n \n def get_real(self, x, y):\n '''\n Translates the coordinates from space with canvas' size\n onto absolute space and force not to go beyond the\n absolute lower and upper bounds.\n x : x-coordinate on the canvas space (int)\n y : y-coordinate on the canvas space (int)\n \n Returns -\n Tuple of corrected coordinates (x, y) (int)\n \n (With larger images fitted on the fixed canvas,\n and the behaviour of Tk canvas that coordinates\n are drawn relative to the canvas space, doing stuffs\n can be (are) quite drunkening. The function was made\n to save the doer from hell like that.)\n '''\n min_w, min_h, max_w, max_h = self.__master.bbox('image')\n \n y = (y < min_h) * (min_h) + (min_h <= y < max_h) * y + (y >= max_h) * (max_h-1)\n x = (x < min_w) * (min_w) + (min_w <= x < max_w) * x + (x >= max_w) * (max_w-1)\n \n return x, y\n \n def info(self):\n '''\n Constructs information about the bounding box\n on the correct space.\n \n Returns - \n List of:\n - centre on x direction (float)\n - centre on y direction (float)\n - width (float)\n - height (float)\n with relative to the actual image space\n in [0, 1] to adapt various sizes.\n '''\n min_w, min_h, max_w, max_h = self.__master.bbox('image')\n x1, y1, x2, y2 = self.__master.bbox(self.id)\n width = abs(x1 - x2) / (max_w - min_w)\n if width > 1: width = 1\n height = abs(y1 - y2) / (max_h - min_h)\n if height > 1: height = 1\n centerx = (x1 + x2) / (2 * (max_w - min_w))\n centery = (y1 + y2) / (2 * (max_h - min_h))\n return [round(i,4) for i in (centerx, centery, width, height)]\n \n def update(self, new_end_x, new_end_y, make_square = False):\n '''\n Updates the ending coordinates (current cursor position)\n onto the correct space.\n new_end_x : x part of new ending vertix (int)\n new_end_y : y part of new ending vertix (int)\n make_square : flag to let the function know to force draw square (boolean)\n '''\n sx, sy = self.start\n ex, ey = self.get_real(new_end_x, new_end_y)\n if make_square:\n if sx < ex: ex = sx + abs(sy - ey)\n else: ex = sx - abs(sy - ey)\n ex, ey = self.get_real(ex, ey)\n if sy < ey: ey = sy + abs(sx - ex)\n else: ey = sy - abs(sx - ex)\n self.__master.coords(self.id, (sx, sy, ex, ey))\n \n x, y = self.__master.bbox(self.id)[:2]\n self.__master.coords(self.__text, x+2, y+2)\n self.__master.coords(self.__bound, self.__master.bbox(self.__text))\n \n class Point:\n '''\n Creates a point with specified :\n - x, y coordinate\n - label\n - color\n '''\n def __init__(self, master, x, y, name, color):\n '''\n Constructs the point, label and box bounding the label on the canvas.\n master : (tkinter.Canvas)\n x, y : point (int)\n label : label for the box (string)\n color : hex color code (string)\n '''\n self.__master = master\n x, y = self.get_real(x, y)\n self.id = master.create_oval(x, y, x+4, y+4, fill = color, outline = '#ffffff', width = 1)\n x, y = master.bbox(self.id)[:2]\n self.__text = master.create_text(x, y+8, text = name, fill = '#000000', font = ('Courier', 8), anchor = 'ne')\n self.__bound = master.create_rectangle(master.bbox(self.__text), fill = color, width = 0)\n master.lift(self.__text)\n \n def destroy(self):\n '''\n Destroys the box, the label and box bounding \n the label from the canvas.\n '''\n for item in (self.id, self.__text, self.__bound):\n self.__master.delete(item)\n \n def get_real(self, x, y):\n '''\n Translates the coordinates from space with canvas' size\n onto absolute space and force not to go beyond the\n absolute lower and upper bounds.\n x : x-coordinate on the canvas space (int)\n y : y-coordinate on the canvas space (int)\n \n Returns -\n Tuple of corrected coordinates (x, y) (int)\n \n (With larger images fitted on the fixed canvas,\n and the behaviour of Tk canvas that coordinates\n are drawn relative to the canvas space, doing stuffs\n can be (are) quite drunkening. The function was made\n to save the doer from hell like that.)\n '''\n max_w = self.__master.bbox('image')[2]\n max_h = self.__master.bbox('image')[3]\n \n abs_x = int(self.__master.xview()[0] * max_w + x)\n abs_y = int(self.__master.yview()[0] * max_h + y)\n \n return abs_x, abs_y\n \n def info(self):\n '''\n Constructs information about the point\n on the space.\n \n Returns - \n List of:\n - coordinate on x direction (float)\n - coordinate on y direction (float)\n with relative to the actual image space\n in [0, 1] to adapt various sizes.\n '''\n x, y, _, _ = self.__master.bbox(self.id)\n max_w = self.__master.bbox('image')[2]\n max_h = self.__master.bbox('image')[3]\n x = x / max_w\n y = y / max_h\n return [round(x, 4), round(y, 4)]\n \n def __init__(self, root):\n '''\n Take in the root and builds the app up on that.\n '''\n # Configures the root.\n self.root = root\n self.root.title('Anota')\n self.root.resizable(False, False)\n \n # Configures style.\n self.currentTheme = 'light'\n self.style = ttk.Style(self.root)\n try:\n self.root.tk.call('source', 'Azure/azure-dark/azure-dark.tcl')\n self.root.tk.call('source', 'Azure/azure/azure.tcl')\n self.styleDict = {'light': 'azure',\n 'dark' : 'azure-dark'}\n self.bg = {'light': '#ffffff', 'dark': '#333333'}\n self.fg = {'light': '#000000', 'dark': '#ffffff'}\n except: \n self.styleDict = {'light': 'default',\n 'dark' : 'default'}\n self.bg = {'light': '#ffffff', 'dark': '#000000'}\n self.fg = {'light': '#000000', 'dark': '#ffffff'}\n self.style.theme_use(self.styleDict[self.currentTheme])\n \n # Sets up file communication system.\n self.directoryPath = StringVar()\n self.fileIndex = -1\n \n # Sets up class/label and color system\n self.numClasses = 0\n self.classList = list()\n self.colorDict = dict()\n self.currentClass = StringVar()\n \n # Sets up labelling modes\n self.availModes = ['Bounding Box', 'Landmark']\n self.drawDict = {'Bounding Box': self.drawBB, 'Landmark': self.drawLM}\n self.currentMode = StringVar()\n \n # Sets up labelling schemes\n self.availSchemes = ['Classic', 'Extended', 'Hierarchical']\n self.relationGraph = dict() # required in Hierarchical scheme\n self.currentScheme = StringVar()\n \n # Declare data\n self.data = {'annotation-type': str(), \n 'annotation-scheme': str(), \n 'labels': list(), \n 'annotations': dict()}\n \n # Sets up drawing system\n self.clicked = False # click marker required in Bounding Box mode\n \n # Load/Save part\n self.saveOrNot = IntVar(value = 0)\n \n # Start constructing the app stack\n self.MainFrame()\n \n def MainFrame(self):\n '''\n Creates individual parts and position them.\n '''\n self.mainframe = ttk.Frame(self.root) # main frame\n \n self.canvframe = ttk.Frame(self.mainframe) # drawing area\n self.logframe = ttk.LabelFrame(self.mainframe, text = 'Log') # log\n self.detailframe = ttk.LabelFrame(self.mainframe, text = 'Details') # label choosing area\n self.configframe = ttk.LabelFrame(self.mainframe, text = 'Configuration') # settings area\n self.fileframe = ttk.LabelFrame(self.mainframe, text = 'Files') # file system area\n \n # Positioning\n self.mainframe.grid(row = 0, column = 0)\n self.canvframe.grid(row = 0, column = 1, rowspan = 3, pady = 20, sticky = W)\n self.logframe.grid(row = 0, column = 2, padx = 20, pady = (20, 10), sticky = W)\n self.detailframe.grid(row = 0, column = 0, rowspan = 3, padx = 20, pady = 20, sticky = W)\n self.configframe.grid(row = 2, column = 2, padx = 20, pady = (10, 20), sticky = W)\n self.fileframe.grid(row = 1, column = 2, padx = 20, pady = 10, sticky = W)\n \n self.LogFrame()\n self.FileFrame()\n self.PrepareCanvas()\n self.DetailFrame()\n self.ConfigFrame()\n \n def PrepareCanvas(self):\n '''\n Prepares the drawing area.\n '''\n master = self.canvframe\n \n self.currentLabel = ttk.Label(master, text = '', font = (None, 11)) # shows current label string\n self.colorPatch = Canvas(master, width = 100, height = 5, relief = 'flat', bg = self.bg[self.currentTheme], highlightthickness = 0, bd = 0) # color idicator\n self.canvCanvas = Canvas(master, width = 640, height = 480, relief = 'flat', bg = self.bg[self.currentTheme], highlightthickness = 0, bd = 0) # the actual drawing canvas\n \n # Positioning\n self.currentLabel.grid(row = 0, column = 0, pady = 10, sticky = W)\n self.colorPatch.grid(row = 1, column = 0, pady = 10, sticky = (N, E, W, S))\n self.canvCanvas.grid(row = 3, column = 0, pady = 10, sticky = (N, E, W, S))\n \n def CanvasFrame(self):\n '''\n Manages the actual drawing canvas.\n Called as every change in file.\n '''\n master = self.canvframe\n try: self.clearFrame(self.canvCanvas) # to avoid confusing view and memory leak\n except: pass\n try: filepath = self.directoryPath.get()+'/'+self.currentFile() # to ensure a file is called\n except: return\n self.data['annotations'][self.currentFile()] = []\n self.stuffLog = [] # container for every drawings on the current canvas\n self.clearLog(self.logBox) # to avoid confusing\n img = Image.open(filepath)\n \n w, h = img.size\n if w > h: img = img.resize((640, int(h/w*640)))\n elif w < h: img = img.resize((int(w/h*480), 480))\n else: img = img.resize((480, 480))\n \n tkimg = ImageTk.PhotoImage(img)\n master.image = tkimg # don't know why this has to be done - just don't touch\n self.canvCanvas.create_image(320, 240, anchor = 'center', image = tkimg, tags = 'image') # fits the image on the canvas\n \n def LogFrame(self):\n '''\n Briefly reports the user that what have been done.\n '''\n master = self.logframe\n \n indiLabel = ttk.Label(master, text = '>') # to look cool\n self.logBox = Text(master, font = ('Courier', 10),\n width = 32, height = 10, \n relief = 'flat', state = 'disabled', \n background = self.bg[self.currentTheme], foreground = self.fg[self.currentTheme],\n highlightthickness = 0)\n \n indiLabel.grid(row = 0, column = 0, padx = (20, 0), pady = 10, sticky = N)\n self.logBox.grid(row = 0, column = 1, padx = 20, pady = 10, rowspan = 2, sticky = (N, E, W, S))\n \n def FileFrame(self):\n '''\n Creates actions points to help user\n play with the files.\n '''\n master = self.fileframe\n \n direLabel = ttk.Label(master, text = 'Current Directory :')\n self.sldiLabel = ttk.Label(master, text = 'None')\n fileLabel = ttk.Label(master, text = 'Current File :')\n self.slfiLabel = ttk.Label(master, text = 'None')\n chfiButton = ttk.Button(master, text = 'Choose Directory', command = self.chooseDirectory)\n commands = [self.prevFile, self.CanvasFrame, self.bindCanvas]\n prevButton = ttk.Button(master, text = 'Previous', command = self.commit(commands))\n commands = [self.nextFile, self.CanvasFrame, self.bindCanvas]\n nextButton = ttk.Button(master, text = 'Next', command = self.commit(commands), style = 'AccentButton')\n \n # positioning\n direLabel.grid(row = 0, column = 0, padx = 20, pady = 10, sticky = W)\n self.sldiLabel.grid(row = 0, column = 1, padx = 20, pady = 10, sticky = E)\n fileLabel.grid(row = 1, column = 0, padx = 20, pady = 10, sticky = W)\n self.slfiLabel.grid(row = 1, column = 1, padx = 20, pady = 10, sticky = E) \n chfiButton.grid(row = 2, column = 0, columnspan = 2, padx = 20, pady = 10, sticky = (N, E, W, S))\n prevButton.grid(row = 3, column = 0, padx = 20, pady = 10, sticky = (N, E, W, S))\n nextButton.grid(row = 3, column = 1, padx = 20, pady = 10, sticky = (N, E, W, S))\n \n def DetailFrame(self):\n '''\n Creates choice points (radio-buttons/check_buttons)\n to help the user choose a label string to work.\n '''\n # ------ Local helper functions down here ------\n def processChoice(index):\n '''\n After the user makes a choice, \n - actual label (single label in Classic\n and multi labels in non-Classic) need to be \n carefully fetched.\n - respective color need to be shown.\n The function takes care.\n '''\n # Assemble actual label\n if self.currentScheme.get() != 'Classic':\n label = []\n for i in range(index+1):\n if self.labelarr[i].get():\n self.currentClass.set(self.classList[i].get())\n label.append(self.classList[i].get())\n label = '-'.join(label)\n \n # Enable/Disable buttons in Hierarchical scheme\n if self.currentScheme.get() == 'Hierarchical':\n guide_buttons(index)\n \n else:\n label = self.currentClass.get()\n \n try: # Ensures a valid label is chosen\n self.currentLabel.configure(text = label)\n self.colorPatch.configure(background = self.colorDict[self.currentClass.get()])\n except: # If not, invisiblise the label and color penel\n self.currentLabel.configure(text = '')\n self.colorPatch.configure(background = self.bg[self.currentTheme])\n \n def find_parent(node_index):\n '''Finds parent of given node index in the relationGraph.'''\n for parent, kids in self.relationGraph.items():\n if node_index in kids: \n return str(parent) if parent != 'root' else parent\n \n def find_siblings(node_index):\n '''Finds siblings of given node index in the relationGraph.'''\n return self.relationGraph[find_parent(node_index)]\n \n def find_children(node_index):\n '''Finds children of given node index in the relationGraph.'''\n try: return self.relationGraph[str(node_index)]\n except: return []\n \n def switch_state(button):\n '''Switches the state of buttons.'''\n if str(button['state']) == 'normal':\n button.configure(state = 'disabled')\n else: button.configure(state = 'normal')\n \n def guide_buttons(node_index):\n '''Manages buttons -\n if clicked buttton is checked\n - shuts down parent and siblings\n - turns on children\n else\n - does the opposite'''\n parent = find_parent(node_index)\n if parent != 'root':\n switch_state(self.buttons[int(parent)])\n for i in find_siblings(node_index):\n if i == node_index: continue\n switch_state(self.buttons[i])\n for i in find_children(node_index):\n switch_state(self.buttons[i])\n # ------ Local helper functions end here -----\n \n master = self.detailframe\n self.clearFrame(master)\n \n self.slscLabel = ttk.Label(master, text = 'Scheme\\t: '+self.currentScheme.get())\n self.slmoLabel = ttk.Label(master, text = 'Mode\\t: '+self.currentMode.get())\n \n labelselectCanvas = Canvas(master, width = 200, height = 450, relief = 'flat', bd = 0, highlightthickness = 0, bg = self.bg[self.currentTheme])\n labels = self.getValues(self.classList)\n \n if self.currentScheme.get() == 'Classic': # ensures only ONE label is chosen in Classic\n self.buttons = [ttk.Radiobutton(labelselectCanvas,\n text = labels[i],\n variable = self.currentClass,\n value = labels[i]) for i in range(self.numClasses)]\n else: # allows multiple label choices in non-Classic\n self.labelarr = [IntVar(value = 0) for _ in range(self.numClasses)]\n self.buttons = [ttk.Checkbutton(labelselectCanvas,\n text = labels[i],\n variable = self.labelarr[i],\n onvalue = 1, \n offvalue = 0) for i in range(self.numClasses)]\n \n if self.currentScheme.get() == 'Hierarchical': # shuts down non-root buttons\n for key, item in self.relationGraph.items():\n if key != 'root':\n for i in item:\n self.buttons[i]['state'] = 'disabled'\n \n for i in range(self.numClasses): # positions the buttons and binds with action functions\n self.buttons[i].configure(command = lambda index = i: processChoice(index))\n labelselectCanvas.create_window((0, i*35), window = self.buttons[i], anchor = NW)\n \n # Scrollbars\n sby = ttk.Scrollbar(master, orient = VERTICAL, command = labelselectCanvas.yview)\n sbx = ttk.Scrollbar(master, orient = HORIZONTAL, command = labelselectCanvas.xview)\n labelselectCanvas.config(xscrollcommand = sbx.set, yscrollcommand = sby.set, scrollregion = labelselectCanvas.bbox('all'))\n \n # Key bindings\n labelselectCanvas.bind('<Button-4>', lambda _: labelselectCanvas.yview_scroll(1, 'units'))\n labelselectCanvas.bind('<Button-5>', lambda _: labelselectCanvas.yview_scroll(-1, 'units'))\n \n # Positioning\n labelselectCanvas.grid(row = 2, column = 0, padx = (20, 10), pady = 10, sticky = (N, E, W, S))\n self.slscLabel.grid(row = 0, column = 0, padx = (20, 10), pady = 10, sticky = W)\n self.slmoLabel.grid(row = 1, column = 0, padx = (20, 10), pady = 10, sticky = W)\n sby.grid(row = 2, column = 1, pady = 20, sticky = (N, S))\n sbx.grid(row = 3, column = 0, padx = 20, sticky = (E, W))\n \n def ConfigFrame(self):\n '''\n Creates configurative buttons to help\n the user configure.\n '''\n master = self.configframe\n \n saveButton = ttk.Button(master, text = 'Save', command = self.save, style = 'AccentButton')\n darkSwitch = ttk.Checkbutton(master, text = 'Switch Mode', style = 'Switch', command = self.switchMode)\n settButton = ttk.Button(master, text = 'Settings', command = self.SettingsFrame)\n \n # Positioning\n saveButton.grid(row = 0, column = 0, padx = 25, pady = 10, sticky = (N, E, W, S))\n darkSwitch.grid(row = 0, column = 1, padx = 25, pady = 10, sticky = (N, E, W, S))\n settButton.grid(row = 1, column = 0, columnspan = 2, padx = 25, pady = 10, sticky = (N, E, W, S))\n \n def SettingsFrame(self):\n '''\n Creates panels that navigate the user to do set-ups for their work.\n '''\n try: self.settings.lift() # ensures not to be called more than ONCE\n except:\n self.settings = Toplevel(self.root, background = self.bg[self.currentTheme])\n self.settings.title('AnoDatum - Settings')\n self.settings.resizable(False, False)\n \n self.modeFrame = ttk.LabelFrame(self.settings, text = 'Mode')\n self.defineFrame = ttk.LabelFrame(self.settings, text = 'Define Labels')\n self.losaFrame = ttk.LabelFrame(self.settings, text = 'Configuration')\n \n # Positioning\n self.modeFrame.grid(row = 0, column = 0, padx = 20, pady = 20, sticky = W)\n self.defineFrame.grid(row = 0, column = 1, padx = 20, pady = 20, rowspan = 3, sticky = W)\n self.losaFrame.grid(row = 2, column = 0, padx = 20, pady = 20, sticky = W)\n\n self.ModeFrame()\n self.LoadSaveFrame()\n \n def ModeFrame(self):\n '''\n Panel that helps the user choose mode and scheme.\n Currently supports:\n - Bounding Box\n - Landmark\n '''\n master = self.modeFrame\n modeLabel = ttk.Label(master, text = 'Choose Mode :')\n modeBox = ttk.Combobox(master, state = 'readonly', textvariable = self.currentMode, values = self.availModes)\n schemeLabel = ttk.Label(master, text = 'Choose Scheme :')\n schemeBox = ttk.Combobox(master, state = 'readonly', textvariable = self.currentScheme, values = self.availSchemes)\n comoButton = ttk.Button(master, text = 'Set', style = 'AccentButton', command = self.DefineFrame)\n \n modeBox.set(self.availModes[0])\n schemeBox.set(self.availSchemes[0])\n\n # Positioning\n modeLabel.grid(row = 0, column = 0, padx = 20, pady = 10, sticky = W)\n modeBox.grid(row = 0, column = 1, padx = 20, pady = 10)\n schemeLabel.grid(row = 1, column = 0, padx = 20, pady = 10, sticky = W)\n schemeBox.grid(row = 1, column = 1, padx = 20, pady = 10)\n comoButton.grid(row = 2, column = 0, padx = 20, pady = 10, columnspan = 2, sticky = (N, E, W, S))\n \n def DefineFrame(self):\n '''\n Creates a treeview that helps the user\n add, define/name, remove labels.\n '''\n # ----- Local helper functions down here -----\n def add_node(tree):\n '''\n Adds a new label to the classList and shows in the treeview.\n '''\n self.numClasses += 1 # gives the guy a place\n self.setUpClassList()\n index = self.numClasses-1 # gets the index of last item\n declTree.insert('', END, text = self.classList[index].get())\n \n def add_sibling(tree):\n '''\n Adds a new sibling label of clicked item to the classList and shows in the treeview.\n '''\n self.numClasses += 1 # gives the guy a place\n self.setUpClassList()\n index = self.numClasses - 1 # gets the index of last item in classList\n parent = tree.parent(tree.focus()) # gets the parent of focused item in the tree\n declTree.insert(parent, END, text = str(self.classList[index].get()))\n self.buildGraph(parent, self.relationGraph, tree)\n \n def add_child(tree):\n '''\n Adds a new child label of clicked item to the classList and shows in the treeview.\n '''\n self.numClasses += 1 # gives the guy a place\n self.setUpClassList()\n index = self.numClasses - 1 # gets the index of last item in classList\n parent = tree.focus() # parent is the clicked item\n tree.insert(parent, END, text = str(self.classList[index].get())) # new child is added under clicked item\n self.buildGraph(parent, self.relationGraph, tree)\n \n def delete_node(tree):\n '''\n Deletes a label node from the tree and classList.\n Deleteing a node with children will also delete\n its children. {Implement this!}\n '''\n try: # ensures to take action only when something is selected\n index = int(tree.focus().replace('I',''), 16)-1 # retrives decimal index from hexadecimal return of tree's focus to be used in classList\n parent = tree.parent(tree.focus()) # keeps the parent of focused item for later use\n self.classList.pop(index) # deletes the label and its children from the classList\n tree.delete(tree.focus()) # deletes the item from the tree\n self.numClasses = len(self.classList) # retrives the classList length controller\n self.buildGraph(parent, self.relationGraph, tree) # uses the parent here\n except: pass\n \n def edit(tree):\n '''\n Creates a Toplevel and allow the user rename the label they clicked.\n + Press Enter to finish +\n '''\n try: clicked_index = int(tree.focus().replace('I',''), 16) - 1\n except: return\n temp = Toplevel(self.root, background = self.bg[self.currentTheme])\n temp.title('Edit Label Name')\n temp.resizable(False, False)\n \n temp_entry = ttk.Entry(temp, textvariable = self.classList[clicked_index])\n # replaces the label corresponding to the item in treeview is currently on focus with vale of entry\n change = lambda: tree.item(tree.focus(), text = self.classList[clicked_index].get())\n commands = [change, temp.destroy]\n conf_button = ttk.Button(temp, text = 'Confirm', command = self.commit(commands))\n \n temp_entry.bind('<Return>', lambda _: self.commit(commands)())\n \n # Positioning\n temp_entry.grid(row = 0, column = 0, padx = 20, pady = (20, 10))\n conf_button.grid(row = 1, column = 0, padx = 20, pady = (10, 20), sticky = (N, E, W, S))\n \n def index_to_item(index):\n '''\n Converts integer indices into\n hex string indices used by treeview.\n '''\n num = hex(index+1).replace('0x', '').upper()\n return 'I'+'0'*(3-len(num))+num\n # ----- Local helper functions ends here -----\n \n master = self.defineFrame\n self.clearFrame(master)\n \n declTree = ttk.Treeview(master, selectmode = 'browse', height = 8) \n declTree.column('#0', width = 300, stretch = False)\n declTree.heading('#0', text = 'Labels')\n \n # tries to build treeview from the classList if exists\n if self.currentScheme.get() in ['Classic', 'Extended']:\n for item in self.getValues(self.classList):\n declTree.insert('', END, text = item)\n labelButton = ttk.Button(master, text = 'Add Label', command = lambda : add_node(declTree))\n else:\n self.buildGraph('', self.relationGraph, declTree)\n for key, indices in self.relationGraph.items():\n if key == 'root': key = ''\n else: key = index_to_item(int(key))\n for index in indices:\n declTree.insert(key, index, text = self.classList[index].get())\n siblingButton = ttk.Button(master, text = 'Add Sibling', command = lambda : add_sibling(declTree))\n childButton = ttk.Button(master, text = 'Add Child', command = lambda : add_child(declTree))\n \n deleteButton = ttk.Button(master, text = 'Delete', command = lambda : delete_node(declTree))\n \n # Scrollbars\n sby = ttk.Scrollbar(master, orient = VERTICAL, command=declTree.yview)\n sbx = ttk.Scrollbar(master, orient = HORIZONTAL, command=declTree.xview)\n \n declTree.config(xscrollcommand = sbx.set, yscrollcommand = sby.set)\n \n # Positioning\n declTree.grid(row = 0, column = 0, columnspan = 3, padx = (20, 0), pady = 10, sticky = (N, E, W, S))\n if self.currentScheme.get() in ['Classic', 'Extended']:\n labelButton.grid(row = 2, column = 1, padx = 10, pady = (10, 20))\n else:\n siblingButton.grid(row = 2, column = 0, padx = (20, 10), pady = (10, 20))\n childButton.grid(row = 2, column = 1, padx = 10, pady = (10, 20))\n deleteButton.grid(row = 2, column = 2, padx = (10, 20), pady = (10, 20))\n sby.grid(row=0, column=3, padx = 10, pady = 20, sticky = 'ns')\n sbx.grid(row=1, column=0, columnspan = 3, padx = 20, sticky = 'ew')\n \n # Key bindings\n declTree.bind('<Double-1>', lambda _: edit(declTree))\n\n def LoadSaveFrame(self):\n '''\n Creates buttons to help load/submit existing settings.\n '''\n master = self.losaFrame\n loadButton = ttk.Button(master, text = 'Load Settings', command = self.loadSettings)\n saveCheck = ttk.Checkbutton(master, text = 'Remember settings', variable = self.saveOrNot, onvalue = 1, offvalue = 0)\n commands = [self.setUp, self.settings.destroy]\n confButton = ttk.Button(master, text = 'Confirm', style = 'AccentButton', command = self.commit(commands))\n \n # Positioning\n loadButton.grid(row = 0, column = 0, padx = 20, pady = 10, sticky = (N, E, W, S))\n saveCheck.grid(row = 0, column = 1, padx = 20, pady = 10, sticky = (N, E, W, S))\n confButton.grid(row = 2, column = 0, padx = 20, pady = 10, columnspan = 2, sticky = (N, E, W, S))\n \n def bindCanvas(self):\n '''\n Binds drawing canvas with required key bindings.\n '''\n try: # ensures existence of canvas to be binded\n to_bind = self.canvCanvas\n draw = self.drawDict[self.currentMode.get()]\n to_bind.bind('<Button-1>', draw)\n if self.currentMode.get() == 'Bounding Box':\n to_bind.bind('<Motion>', draw)\n to_bind.bind('<Control-1>', draw)\n elif self.currentMode.get() == 'Landmark':\n to_bind.bind('<Button-3>', draw)\n self.root.bind_all('<Control-z>', self.undo)\n except: pass\n \n def buildGraph(self, node, placeholder, tree):\n '''\n Builds a relational graph from the tree.\n Recursively called but the graph is LINEAR.\n '''\n try: index = str(int(node.replace('I',''), 16) - 1) # checks if node passed is root or not\n except: index = 'root'\n literal_classList = self.getValues(self.classList)\n children = tree.get_children(node) # gets children of node\n if children != (): # if there are any children\n placeholder[index] = [] # gives the node's family a place in graph\n for child in children: # for each child of node\n placeholder[index].append(literal_classList.index(tree.item(child)['text'])) # they goes into the node's family\n self.buildGraph(child, placeholder, tree) # goes find thier children again\n\n def clearFrame(self, frame):\n '''\n Clears the frame by destroying its children.\n '''\n for wid in frame.winfo_children():\n wid.destroy()\n \n def clearLog(self, pointer):\n '''\n Clears the log (pointer) by deleting every line.\n '''\n pointer.configure(state = 'normal')\n pointer.delete('1.0', END)\n pointer.configure(state = 'disabled')\n\n def commit(self, commands):\n '''\n Runs each function pointer in the commands list.\n '''\n return lambda: [func() for func in commands]\n \n def chooseDirectory(self):\n '''\n Let the user choose a directory to work.\n '''\n self.directoryPath.set(filedialog.askdirectory())\n self.fileIndex = -1\n self.sldiLabel.config(text = self.directoryPath.get().split('/')[-1])\n \n def currentFile(self):\n '''\n Gets current file.\n '''\n try: return self.directoryList()[self.fileIndex] # ensures to avoid IndexError\n except: return None\n\n def directoryList(self):\n '''\n Gets the directory as a list.\n '''\n # ensures correct directory path.\n try: return [filename for filename in os.listdir(self.directoryPath.get()) if self.isImage(filename)]\n except FileNotFoundError: return None\n \n def drawBB(self, event):\n '''\n Instantiate Box class and draw bounding boxes\n in accordance to mouse event.\n + Left click to create a raw box. +\n + Hover to adjust. +\n + Left click again to finalise the box. +\n + Press Ctrl while hovering to force shape square. +\n '''\n try: # ensures a label is selected\n color = self.colorDict[self.currentClass.get()]\n self.stuffLog == []\n except: return\n master = self.canvCanvas\n # reterives extremes\n min_w, min_h, max_w, max_h = master.bbox('image')\n # ensures the click event happen only on the image\n if event.x < min_w or event.x > max_w: return\n if event.y < min_h or event.y > max_h: return\n if 'ButtonPress' in str(event):\n if self.clicked: # second click to finalise the box\n self.clicked = False\n self.submit()\n else: # first click to create a raw box\n self.clicked = True\n self.stuffLog.append(self.Box(master, \n event.x, event.y, \n event.x+1, event.y+1, \n self.currentLabel['text'], color))\n elif 'Motion' in str(event) and self.clicked: # hover to adjust the size\n square = 'Control' in str(event) # press Ctrl to force draw square\n self.stuffLog[-1].update(event.x, event.y, square)\n \n def drawLM(self, event):\n '''\n Instantiate Point class and draw points\n in accordance to mouse click.\n + Left click to mark a point. +\n '''\n try: # ensures a label is selected\n color = self.colorDict[self.currentClass.get()]\n self.stuffLog == []\n except: return\n master = self.canvCanvas\n # reterives extremes\n min_w, min_h, max_w, max_h = master.bbox('image')\n # ensures the click event happen only on the image\n if event.x < min_w or event.x > max_w: return\n if event.y < min_h or event.y > max_h: return\n if 'ButtonPress' in str(event):\n if event.num == 1: # left click to create a point\n self.stuffLog.append(self.Point(master, event.x, event.y, self.currentLabel['text'], color))\n self.submit()\n if event.num == 3 or self.currentScheme.get() == 'Classic': # right click to switch to another label automatically\n classId = self.getValues(self.classList).index(self.currentClass.get())\n self.currentClass.set(self.data['labels'][(classId+1)%self.numClasses])\n self.currentLabel.configure(text = self.currentClass.get())\n self.colorPatch.configure(background = self.colorDict[self.currentClass.get()])\n \n def drawSS(self, event):\n pass\n \n def drawPS(self, event):\n pass\n \n def getValues(self, list_):\n '''\n Returns a value-only list of list of tk variables.\n '''\n return [item.get() for item in list_]\n\n def giveColors(self):\n '''\n Assigns hex color codes to each of unique labels.\n (If there are 3 unique labels, 1st label gets hue of 0 degree,\n 2nd label gets hue of 120 degree, and 3rd label gets hue of 240 degree.\n Then hue space is mapped to hex space.)\n '''\n one_part = 360//self.numClasses\n degrees = [one_part*i for i in range(self.numClasses)]\n for clas, deg in zip(self.getValues(self.classList), degrees):\n self.colorDict[clas] = self.hue2hex(deg)\n \n def hue2hex(self, degree):\n '''\n Converts hue degrees into hex codes.\n '''\n rgb = colorsys.hsv_to_rgb(degree/360, 1, 1)\n r, g, b = int(rgb[0]*255), int(rgb[1]*255), int(rgb[2]*255)\n return f'#{r:02x}{g:02x}{b:02x}'\n \n def isImage(self, filename):\n '''\n Checks if a file is valid image.\n '''\n extension = filename.split('.')[-1]\n extensions = ['jpg', 'jpeg', 'png']\n return extension in extensions\n \n def loadSettings(self):\n '''\n Loads control variables from valid file containing past settings.\n '''\n settingsPath = filedialog.askopenfilename(filetypes = [('JSON', '.json')])\n try: # ensures settings get loaded correctly\n settingsDict = json.load(open(settingsPath)) # json to dict\n self.currentMode.set(settingsDict['mode'])\n self.currentScheme.set(settingsDict['scheme'])\n self.numClasses = settingsDict['num-labels']\n self.relationGraph = settingsDict['relation-graph']\n self.classList = [StringVar() for _ in range(self.numClasses)]\n for i in range(self.numClasses):\n self.classList[i].set(settingsDict['labels'][i])\n except: # if not, show message\n a = Toplevel(self.settings, background = self.bg[self.currentTheme])\n ttk.Label(a, text = 'Couldn\\'t load.', font = (None, 20)).grid(row = 0, column = 0, padx = 20, pady = 20)\n \n def nextFile(self):\n '''\n Increments fileIndex so that self.currentFile can fetch next file.\n '''\n if self.currentFile(): # ensures a directory is selected\n self.fileIndex += 1\n self.slfiLabel.config(text = self.currentFile())\n \n def prevFile(self):\n '''\n Decrements fileIndex so that self.currentFile can fetch previous file.\n '''\n if self.currentFile(): # ensures a directory is selected\n self.fileIndex -= 1\n self.slfiLabel.config(text = self.currentFile())\n \n def save(self):\n '''\n Saves worked data to a file.\n Saved file names are -\n 'data(dd)-(mm)-(yyyy)_(hh)-(mm)-(ss).json'\n -by default.\n '''\n try: # ensures data get saved correctly\n string = json.dumps(self.data, sort_keys=True, indent = 4)\n f = open(self.directoryPath.get()+'/data{}.json'.format(time.strftime('@%d-%m-%Y_%H-%M-%S')), 'w')\n f.write(string)\n f.close()\n except: # if not, show message\n a = Toplevel(self.root, background = self.bg[self.currentTheme])\n ttk.Label(a, text = 'Couldn\\'t save.', font = (None, 20)).grid(row = 0, column = 0, padx = 20, pady = 20)\n \n def saveSettings(self):\n '''\n Saves current settings to a file.\n Saved file names are -\n 'settings(dd)-(mm)-(yyyy)_(hh)-(mm)-(ss).json'\n -by default.\n '''\n try: # ensures settings get saved correctly\n settings = {'mode': self.currentMode.get(),\n 'scheme': self.currentScheme.get(),\n 'num-labels': self.numClasses,\n 'labels': self.getValues(self.classList),\n 'relation-graph': self.relationGraph} # constructs settings dict\n string = json.dumps(settings, indent = 4)\n f = open(self.directoryPath.get()+'setting{}.json'.format(time.strftime('@%d-%m-%Y_%H-%M-%S')), 'w')\n f.write(string)\n f.close()\n except: # if not, show message\n a = Toplevel(self.settings, background = self.bg[self.currentTheme])\n msg = ttk.Label(a, text = 'Couldn\\'t save', font = (None, 20)).grid(row = 0, column = 0, padx = 20, pady = 20)\n \n def setUp(self):\n '''\n Sets up control vaiables and do actions \n according to settings.\n '''\n self.giveColors()\n self.data['annotation-type'] = self.currentMode.get()\n self.data['annotation-scheme'] = self.currentScheme.get()\n self.data['labels'] = self.getValues(self.classList)\n self.data['relation-graph'] = self.relationGraph\n self.DetailFrame()\n try: self.bindCanvas()\n except: pass\n if self.saveOrNot.get():\n self.saveSettings()\n \n def setUpClassList(self):\n '''\n Updates classList depending on numClasses.\n '''\n for i in range(self.numClasses):\n try: # if classList has members but not initiated\n if self.classList[i].get() == '':\n self.classList[i].set('unamed-'+str(i))\n except: # if desired member doesn't exist (i.e. len(classList) < numClasses, and raises IndexError in above block\n self.classList.append(StringVar())\n self.classList[i].set('unamed-'+str(i))\n \n def submit(self):\n '''\n Submits th information of drawn objects when user finalises.\n i.e. label indicator, x centre, y centre, width, height\n '''\n # ---- Local helper functions down here -----\n def prefix():\n '''\n Label indicator must be a readable string when shown in log,\n and a numeric array/list when actually used. The function returns \n 'numeric' and 'string' versions of label indicator.\n '''\n pf = dict()\n if self.currentScheme.get() == 'Classic': # just fetching index from classList is enough in Classic\n pf['numeric'] = [self.data['labels'].index(self.currentClass.get())]\n else: # multiple labels could be used in non-Classic\n pf['numeric'] = self.getValues(self.labelarr)\n pf['string'] = [self.currentLabel['text']]\n return pf\n # ---- Local helper functions end here -----\n \n prefixes = prefix() # reterives label indicator\n to_put = prefixes['numeric'] + self.stuffLog[-1].info() # for actual use\n self.data['annotations'][self.currentFile()].append(to_put)\n to_put = prefixes['string'] + self.stuffLog[-1].info() # for visual in log\n self.updateLog(str(to_put), self.logBox, 'a')\n \n def switchMode(self):\n '''\n Switches light and dark themes.\n '''\n # ----- Local helper functions down here -----\n def changeBgFg(root):\n '''\n Changes bg and fg of widgets if okay.\n '''\n for _, wid in root.children.items():\n tw = type(wid)\n if tw in [Toplevel, ttk.Frame, ttk.LabelFrame]:\n try:\n wid.configure(background = self.bg[self.currentTheme])\n wid.configure(foreground = self.fg[self.currentTheme])\n except: pass\n finally: changeBgFg(wid)\n elif tw in [Text, Canvas, ttk.Combobox]:\n try: \n wid.configure(background = self.bg[self.currentTheme])\n wid.configure(foreground = self.fg[self.currentTheme])\n wid.configure(highlightthickness = 0)\n except: pass\n # ----- Local helper functions end here -----\n \n if self.currentTheme == 'light':\n self.currentTheme = 'dark'\n elif self.currentTheme == 'dark':\n self.currentTheme = 'light'\n self.style.theme_use(self.styleDict[self.currentTheme])\n changeBgFg(self.root)\n \n def undo(self, event):\n '''\n Undoes an unwanted action done on the drawing area.\n i.e. removes last drawn object.\n '''\n try: # ensures at least one object is drawn\n self.stuffLog[-1].destroy() # deletes the object leaving a placeholder in stuffLog\n self.updateLog(str(self.data['annotations'][self.currentFile()][-1]), self.logBox, 'r') # removes its message line from log\n self.stuffLog.pop() # removes its placeholder from collection of objects for current file\n self.data['annotations'][self.currentFile()].pop() # removes from data record\n except: pass\n \n def updateLog(self, string, pointer, mode):\n '''\n Updates content of 'pointer' with 'string'.\n modes: \n - a : add\n - r : remove\n '''\n pointer.configure(state = 'normal')\n if mode == 'a':\n pointer.insert('1.0', 'added : '+string+'\\n')\n if mode == 'r':\n pointer.delete('1.0', '2.0')\n pointer.configure(state = 'disabled')\n\nroot = Tk()\napp = Anota(root)\nroot.mainloop()\n", "repo_name": "albertdoes/that-datum", "sub_path": "Anota.py", "file_name": "Anota.py", "file_ext": "py", "file_size_in_byte": 47358, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "tkinter.ttk.Style", "line_number": 202, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 202, "usage_type": "name"}, {"api_name": "tkinter.ttk.Frame", "line_number": 256, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 256, "usage_type": "name"}, {"api_name": "tkinter.ttk.Frame", "line_number": 258, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 258, "usage_type": "name"}, {"api_name": "tkinter.ttk.LabelFrame", "line_number": 259, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 259, "usage_type": "name"}, {"api_name": "tkinter.ttk.LabelFrame", "line_number": 260, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 260, "usage_type": "name"}, {"api_name": "tkinter.ttk.LabelFrame", "line_number": 261, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 261, "usage_type": "name"}, {"api_name": "tkinter.ttk.LabelFrame", "line_number": 262, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 262, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 284, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 284, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 306, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 306, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 313, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 313, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 323, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 323, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 340, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 340, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 341, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 341, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 342, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 342, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 343, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 343, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 344, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 344, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 346, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 346, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 348, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 348, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 438, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 438, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 439, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 439, "usage_type": "name"}, {"api_name": "tkinter.ttk.Radiobutton", "line_number": 445, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 445, "usage_type": "name"}, {"api_name": "tkinter.ttk.Checkbutton", "line_number": 451, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 451, "usage_type": "name"}, {"api_name": "tkinter.ttk.Scrollbar", "line_number": 468, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 468, "usage_type": "name"}, {"api_name": "tkinter.ttk.Scrollbar", "line_number": 469, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 469, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 490, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 490, "usage_type": "name"}, {"api_name": "tkinter.ttk.Checkbutton", "line_number": 491, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 491, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 492, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 492, "usage_type": "name"}, {"api_name": "tkinter.ttk.LabelFrame", "line_number": 509, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 509, "usage_type": "name"}, {"api_name": "tkinter.ttk.LabelFrame", "line_number": 510, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 510, "usage_type": "name"}, {"api_name": "tkinter.ttk.LabelFrame", "line_number": 511, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 511, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 529, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 529, "usage_type": "name"}, {"api_name": "tkinter.ttk.Combobox", "line_number": 530, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 530, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 531, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 531, "usage_type": "name"}, {"api_name": "tkinter.ttk.Combobox", "line_number": 532, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 532, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 533, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 533, "usage_type": "name"}, {"api_name": "tkinter.ttk.Entry", "line_number": 608, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 608, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 612, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 612, "usage_type": "name"}, {"api_name": "tkinter.ttk.Treeview", "line_number": 632, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 632, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 640, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 640, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 648, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 648, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 649, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 649, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 651, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 651, "usage_type": "name"}, {"api_name": "tkinter.ttk.Scrollbar", "line_number": 654, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 654, "usage_type": "name"}, {"api_name": "tkinter.ttk.Scrollbar", "line_number": 655, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 655, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 678, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 678, "usage_type": "name"}, {"api_name": "tkinter.ttk.Checkbutton", "line_number": 679, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 679, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 681, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 681, "usage_type": "name"}, {"api_name": "tkinter.filedialog.askdirectory", "line_number": 744, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 744, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 760, "usage_type": "call"}, {"api_name": "colorsys.hsv_to_rgb", "line_number": 850, "usage_type": "call"}, {"api_name": "tkinter.filedialog.askopenfilename", "line_number": 866, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 866, "usage_type": "name"}, {"api_name": "json.load", "line_number": 868, "usage_type": "call"}, {"api_name": "tkinter.ttk.Label", "line_number": 878, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 878, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 904, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 905, "usage_type": "call"}, {"api_name": "tkinter.ttk.Label", "line_number": 910, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 910, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 925, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 926, "usage_type": "call"}, {"api_name": "tkinter.ttk.Label", "line_number": 931, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 931, "usage_type": "name"}, {"api_name": "tkinter.ttk.Frame", "line_number": 999, "usage_type": "attribute"}, {"api_name": "tkinter.ttk", "line_number": 999, "usage_type": "name"}, {"api_name": "tkinter.ttk.LabelFrame", "line_number": 999, "usage_type": "attribute"}, {"api_name": "tkinter.ttk.Combobox", "line_number": 1005, "usage_type": "attribute"}, {"api_name": "tkinter.ttk", "line_number": 1005, "usage_type": "name"}]} +{"seq_id": "13577557002", "text": "# NOTE: This is all only regarding the implementation, the why and how are explained in the paper referenced\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nfrom math import log2\r\n\r\n#Factors below indicate the output size required, ie 512 for the first 4, then 256, then 128 then 64 etc.\r\nfactors = [1, 1, 1, 1, 1/2, 1/4, 1/8, 1/16, 1/32]\r\n\r\n#Equalized learning rate for the conv2d\r\nclass EQConv2D(nn.Module):\r\n #Gain is for the initialization constant of 2\r\n def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, gain=2):\r\n super().__init__()\r\n self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding)\r\n self.scale = (gain/(kernel_size**2) * in_channels) ** 0.5\r\n #Bias should not be scaled with it, so it should be declared here as well\r\n self.bias = self.conv.bias\r\n self.conv.bias = None\r\n \r\n #initialize the conv layer\r\n nn.init.normal_(self.conv.weight)\r\n nn.init.zeros_(self.bias)\r\n \r\n #For each forward feed through it and reshaping bias for self.conv\r\n def forward(self, x):\r\n return self.conv(x*self.scale) + self.bias.view(1, self.bias.shape[0], 1, 1)\r\n\r\n#For the vector normalization in generator\r\nclass PixelNorm(nn.Module):\r\n def __init__(self):\r\n super(PixelNorm, self).__init__()\r\n self.epsilon = 1e-8\r\n\r\n def forward(self, x):\r\n #Torch.mean of every pixel value squared, across the channels (which is in dim 1 cause dim 0 is just all the examples), \r\n # then we do keepdim=true so the elementwise division will work, finally add episolon on all of this\r\n return x/torch.sqrt(torch.mean(x**2, dim=1, keepdim=True) + self.epsilon)\r\n\r\n#Conv block will be 2 3x3's\r\nclass ConvBlock(nn.Module):\r\n def __init__(self, in_channels, out_channels, use_pixelnorm=True):\r\n super(ConvBlock, self).__init__()\r\n self.use_pn = use_pixelnorm\r\n self.conv1 = EQConv2D(in_channels, out_channels)\r\n self.conv2 = EQConv2D(out_channels, out_channels)\r\n self.LRelu = nn.LeakyReLU(0.2)\r\n self.pn = PixelNorm()\r\n\r\n def forward(self, x):\r\n x = self.LRelu(self.conv1(x))\r\n x = self.pn(x) if self.use_pn else x\r\n x = self.LRelu(self.conv2(x))\r\n x = self.pn(x) if self.use_pn else x\r\n return x\r\n\r\n#Generator will use pixelnorm as well, discriminator will not\r\nclass Generator(nn.Module):\r\n def __init__(self, z_dim, in_channels, img_channels=3):\r\n super(Generator, self).__init__()\r\n self.initial = nn.Sequential(\r\n #Decided not to use EQconv2d weighting here, paper says to use it\r\n PixelNorm(),\r\n nn.ConvTranspose2d(z_dim, in_channels, 4, 1, 0), # 1x1 -> 4x4\r\n nn.LeakyReLU(0.2),\r\n EQConv2D(in_channels, in_channels, kernel_size=3, stride=1, padding=1),\r\n nn.LeakyReLU(0.2),\r\n PixelNorm()\r\n )\r\n\r\n self.initial_rgb = EQConv2D(in_channels, img_channels, kernel_size=1, stride=1, padding=0)\r\n\r\n self.prog_blocks, self.rgb_layers = nn.ModuleList(), nn.ModuleList([self.initial_rgb])\r\n\r\n for i in range(len(factors) -1):\r\n conv_in_c = int(in_channels * factors[i])\r\n conv_out_c = int(in_channels * factors[i+1])\r\n self.prog_blocks.append(ConvBlock(conv_in_c, conv_out_c))\r\n self.rgb_layers.append(EQConv2D(conv_out_c, img_channels, kernel_size=1, stride=1, padding=0))\r\n\r\n def fade_in(self, alpha, upscale, generated):\r\n #We want output to be between -1 and 1 therefore tanh\r\n return torch.tanh(alpha * generated + (1 - alpha) * upscale)\r\n\r\n def forward(self, x, alpha, steps): #if steps are 0, 4x4, steps is 1 then 8x8 etc.\r\n out = self.initial(x) #4x4\r\n if steps == 0:\r\n return self.initial_rgb(out)\r\n \r\n for step in range(steps):\r\n #upsample before running through prog blocks\r\n upscale = F.interpolate(out, scale_factor=2, mode=\"nearest\")\r\n out = self.prog_blocks[step](upscale)\r\n\r\n final_upscale = self.rgb_layers[steps-1](upscale)\r\n final_out = self.rgb_layers[steps](out)\r\n\r\n return self.fade_in(alpha, final_upscale, final_out)\r\n\r\n\r\nclass Discriminator(nn.Module):\r\n def __init__(self, in_channels, img_channels=3):\r\n super(Discriminator, self).__init__()\r\n self.prog_blocks, self.rgb_layers = nn.ModuleList(), nn.ModuleList()\r\n self.Lrelu = nn.LeakyReLU(0.2)\r\n\r\n for i in range(len(factors)-1, 0, -1):\r\n conv_in_c = int(in_channels* factors[i])\r\n conv_out_c = int(in_channels* factors[i-1])\r\n self.prog_blocks.append(ConvBlock(conv_in_c, conv_out_c, use_pixelnorm=False))\r\n self.rgb_layers.append(EQConv2D(img_channels, conv_in_c, kernel_size=1, stride=1, padding=0))\r\n\r\n #This is for the 4x4 resolution at the end, named the same to match with generator even though it goes at the end\r\n self.inital_rgb = EQConv2D(img_channels, in_channels, kernel_size=1, stride=1, padding=0)\r\n self.rgb_layers.append(self.initial_rgb)\r\n self.avg_pool = nn.AvgPool2d(kernel_size=2, stride=2)\r\n\r\n #Block for the 4x4\r\n self.final_block = nn.Sequential(\r\n EQConv2D(in_channels+1, in_channels, kernel_size=3, stride=1, padding=1),\r\n nn.LeakyReLU(0.2),\r\n EQConv2D(in_channels, in_channels, kernel_size=4, stride=1, padding=0),\r\n nn.LeakyRelu(0.2),\r\n EQConv2D(in_channels, 1, kernel_size=1, stride=1, padding=0)\r\n )\r\n\r\n def fade_in(self, alpha, downscaled, out):\r\n # Used to fade in downscaled using avg pooling and output from CNN\r\n return alpha * out + (1 - alpha) * downscaled\r\n\r\n def minibatch_std(self, x):\r\n #Take the std from each example (across all channels) then repeat for a single channel and concat with the image\r\n #Allows discriminator to learn variance in the batch/img\r\n batch_stats = torch.std(x, dim=0).mean().repeat(x.shape[0], 1, x.shape[2], x.shape[3])\r\n return torch.cat([x, batch_stats], dim=1)\r\n \r\n def forward(self, x, alpha, steps):\r\n #Have to index in reverse order for discrmiinator while keeping the property of step 0 = 4x4, step 1 = 8x8 etc.\r\n cur_steps = len(self.prog_blocks) - steps\r\n out = self.Lrelu(self.rgb_layers[cur_steps](x))\r\n\r\n if steps == 0:\r\n out = self.minibatch_std(out)\r\n return self.final_block(out).view(out.shape[0]-1)\r\n\r\n #Because prog blocks also downscaled, we have to use the rgb layers from the previous/smaller size reason we do +1\r\n downscaled = self.Lrelu(self.rgb_layers[cur_steps +1](self.avg_pool(x)))\r\n out = self.avg_pool(self.prog_blocks[cur_steps](out))\r\n\r\n #Done first between the downscaled and the input\r\n out = self.fade_in(alpha, downscaled, out)\r\n\r\n for step in range(cur_steps+1, len(self.prog_blocks)):\r\n out = self.prog_blocks[step](out)\r\n out = self.avg_pool(out)\r\n\r\n out = self.minibatch_std(out)\r\n return self.final_block(out).view(out.shape[0], -1)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n Z_DIM = 100\r\n IN_CHANNELS = 256\r\n gen = Generator(Z_DIM, IN_CHANNELS, img_channels=3)\r\n critic = Discriminator(IN_CHANNELS, img_channels=3)\r\n\r\n for img_size in [4, 8, 16, 32, 64, 128, 256, 512, 1024]:\r\n num_steps = int(log2(img_size / 4))\r\n x = torch.randn((1, Z_DIM, 1, 1))\r\n z = gen(x, 0.5, steps=num_steps)\r\n assert z.shape == (1, 3, img_size, img_size)\r\n out = critic(z, alpha=0.5, steps=num_steps)\r\n assert out.shape == (1, 1)\r\n print(f\"Success! At img size: {img_size}\")", "repo_name": "AjaybirRandhawa/Face-Generator", "sub_path": "Pro-Gan/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 7807, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "94", "api": [{"api_name": "torch.nn.Module", "line_number": 13, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.init.normal_", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 24, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.nn.init.zeros_", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 25, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 32, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.sqrt", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 43, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 60, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 60, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 63, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 66, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 67, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 69, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.tanh", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.nn.functional.interpolate", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 94, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 103, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 103, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 106, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 107, "usage_type": "name"}, {"api_name": "torch.nn.AvgPool2d", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 118, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 121, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 123, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 123, "usage_type": "name"}, {"api_name": "torch.nn.LeakyRelu", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 125, "usage_type": "name"}, {"api_name": "torch.std", "line_number": 136, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 137, "usage_type": "call"}, {"api_name": "math.log2", "line_number": 170, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 171, "usage_type": "call"}]} +{"seq_id": "72645454069", "text": "\"\"\"Tests basic git operations: commit, log, config\"\"\"\n# pylint: disable=redefined-outer-name\n\nfrom . import helper\nfrom .helper import app_context\n\n\n# These assertions make pylint happy. It considers them unused imports otherwise.\nassert app_context is not None\n\n\ndef test_git_commit(app_context):\n \"\"\"Test running 'git commit' via cola.git\"\"\"\n helper.write_file('A', 'A')\n helper.write_file('B', 'B')\n helper.run_git('add', 'A', 'B')\n\n app_context.git.commit(m='initial commit')\n log = helper.run_git('-c', 'log.showsignature=false', 'log', '--pretty=oneline')\n\n expect = 1\n actual = len(log.splitlines())\n assert expect == actual\n\n\ndef test_git_config(app_context):\n \"\"\"Test cola.git.config()\"\"\"\n helper.run_git('config', 'section.key', 'value')\n expect = (0, 'value', '')\n actual = app_context.git.config('section.key', get=True)\n assert expect == actual\n", "repo_name": "git-cola/git-cola", "sub_path": "test/gitops_test.py", "file_name": "gitops_test.py", "file_ext": "py", "file_size_in_byte": 900, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2129, "dataset": "github-code", "pt": "94", "api": [{"api_name": "helper.app_context", "line_number": 9, "usage_type": "name"}, {"api_name": "helper.write_file", "line_number": 14, "usage_type": "call"}, {"api_name": "helper.write_file", "line_number": 15, "usage_type": "call"}, {"api_name": "helper.run_git", "line_number": 16, "usage_type": "call"}, {"api_name": "helper.app_context.git.commit", "line_number": 18, "usage_type": "call"}, {"api_name": "helper.app_context.git", "line_number": 18, "usage_type": "attribute"}, {"api_name": "helper.app_context", "line_number": 18, "usage_type": "name"}, {"api_name": "helper.run_git", "line_number": 19, "usage_type": "call"}, {"api_name": "helper.run_git", "line_number": 28, "usage_type": "call"}, {"api_name": "helper.app_context.git.config", "line_number": 30, "usage_type": "call"}, {"api_name": "helper.app_context.git", "line_number": 30, "usage_type": "attribute"}, {"api_name": "helper.app_context", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "15191715657", "text": "import os\nimport zipfile\nfrom os.path import basename, join, abspath\n\n\ndef get_target_filename(source_zip_filepath):\n # Extra information to be provided during processing.\n source_filename = basename(source_zip_filepath)\n source_filename_parts = source_filename.split('.')\n source_filename_parts[0] += '_neut'\n return '.'.join(source_filename_parts)\n\n\ndef archive_all_files_in_zip(to_zip_filepath, source_dir):\n with zipfile.ZipFile(to_zip_filepath, \"w\") as target_zip:\n for root, _, files in os.walk(source_dir):\n for filename in files:\n print(\"Archiving: '{}'\".format(filename))\n target_zip.write(abspath(join(root, filename)), arcname=filename)\n print(\"Saving: {}\".format(to_zip_filepath))\n", "repo_name": "nicolay-r/frame-based-attitude-extraction-workflow", "sub_path": "neutral/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 762, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "os.path.basename", "line_number": 8, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 15, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "70560537271", "text": "# -*- coding: utf-8 -*-\n# @Author: jsgounot\n# @Date: 2019-04-03 16:04:11\n# @Last modified by: jsgounot\n# @Last Modified time: 2019-04-08 09:56:37\n\nimport os, glob, shutil\nimport time, datetime\n\nfrom itertools import chain\nfrom threading import Thread\n\nimport pandas as pd\n\nfrom Bio import SeqIO\nfrom Bio.Blast import NCBIWWW, NCBIXML\n\nfrom pyblast import BCLine, BCLineHSPFuse\nfrom pyblast import utils\n\n\"\"\"\n\nOnline blast query\n\n\"\"\"\n\ncurrent_time = lambda : datetime.datetime.now().strftime(\"%H:%M:%S\")\n\nclass Query(object) :\n\n def __init__(self, qbr_instance, job_name, program, database, fasta_str, ** kwargs) :\n self.qbr_instance = qbr_instance\n self.job_name = job_name\n\n self.program = program\n self.database = database\n self.fasta_str = fasta_str\n self.kwargs = kwargs\n\n if kwargs.get(\"format_type\", \"XML\") != \"XML\" :\n raise ValueError(\"Only XML output are managed\")\n\n def trigger(self) :\n print (\"%s : Run qblast (%s - %s) - job name : %s\" %(current_time(), self.program, self.database, self.job_name))\n res = NCBIWWW.qblast(self.program, self.database, self.fasta_str, ** self.kwargs)\n print (\"%s : End qblast - job name : %s\" %(current_time(), self.job_name))\n\n xml_file = utils.TMPFname()\n with open(str(xml_file), \"w\") as f :\n f.write(res.read())\n\n res.close()\n self.qbr_instance.append(xml_file)\n\nclass QueriesManager(object) :\n\n \"\"\"\n\n Run multiple queries within threads\n\n \"\"\"\n\n def __init__(self, queries=[]) :\n self.queries = queries\n\n def __add__(self, other) :\n if not isinstance(other, QueriesManager) :\n raise ValueError(\"Queries Manager can only be associated with other QM\")\n\n new_queries = self.queries + other.queries\n return QueriesManager(new_queries)\n\n def run(self, time_wait=15) :\n # Run parallel qblast queries over ncbi using threads\n # https://blast.ncbi.nlm.nih.gov/Blast.cgi?CMD=Web&PAGE_TYPE=BlastDocs&DOC_TYPE=DeveloperInfo\n # time wait corresponds to the time beetween two parallele requests, if you don't want that\n # ncbi kicks your ass, do not lower this value\n\n threads = []\n fun = lambda query : query.trigger()\n\n for query in self.queries :\n thread = Thread(target=fun, args=(query,))\n thread.start()\n threads.append(thread)\n time.sleep(time_wait)\n\n for thread in threads :\n thread.join()\n\n # --------------------------------------------------\n # Create the Queries Manager from data (fasta)\n # --------------------------------------------------\n\n @staticmethod\n def records_to_str(records) :\n strs = []\n \n for record in records :\n identifier, seq = record.id, str(record.seq)\n strs.append(\">%s\" %(identifier))\n\n for i in range(0, len(seq), 60) :\n strs.append(seq[i:i+60])\n\n return \"\\n\".join(strs)\n\n @staticmethod\n def extract_fasta_str(fasta, chunk_size=10) :\n # return the fasta sequence as a\n # string object\n\n fdata = list(SeqIO.parse(fasta, \"fasta\"))\n\n if chunk_size is None :\n return [QueriesManager.records_to_str(fdata)]\n\n return [QueriesManager.records_to_str(records)\n for records in BCLine.grouper_fdata(fdata, chunk_size)]\n\n @staticmethod\n def from_fasta(program, database, fasta, chunk_size=None, ** kwargs) :\n # Return the QBlastResult and the Queries manager based on qblast parameter and fasta chunk_size\n\n qbr = QBlastResult(fasta=fasta)\n queries = []\n subsequences = list(QueriesManager.extract_fasta_str(fasta, chunk_size))\n\n for idx, subsequence in enumerate(subsequences, start=1) :\n job_name = \"%s (n = %i)\" %(fasta, idx) if len(subsequences) > 1 else fasta\n queries.append(Query(qbr, job_name, program, database, subsequence, ** kwargs))\n\n return qbr, QueriesManager(queries)\n\n @staticmethod\n def from_multiple_fastas(program, database, fasta_list, chunk_size=None, ** kwargs) :\n qbrs, qms = zip(* [QueriesManager.from_fasta(program, database, fasta, chunk_size, ** kwargs)\n for fasta in fasta_list])\n\n qm = sum(qms, QueriesManager())\n qbrs = qbrs if len(qbrs) > 1 else next(iter(qbrs))\n\n return qbrs, qm\n\nclass QBlastResult(object):\n\n \"\"\"\n\n Wrapper of NCBIWWW Qblast function \n http://biopython.org/DIST/docs/api/Bio.Blast.NCBIWWW-module.html\n\n \"\"\"\n\n def __init__(self, xml_files=None, fasta=None) :\n\n self.fasta = fasta\n\n if xml_files is not None :\n xml_files = glob.glob(xml_files) if isinstance(xml_files, str) else xml_files\n self.xml_files = xml_files\n\n else :\n self.xml_files = []\n\n def append(self, xml_file) :\n self.xml_files.append(xml_file)\n\n @staticmethod\n def fuse_xml(xml_files) :\n raise Exception()\n\n def save(self, outfile) :\n if len(self.xml_files) == 0 : raise ValueError(\"Empty QBlastResult instance\")\n xml_file = self.xml_files[0] if len(self.xml_files) == 1 else fuse_xml(self.xml_files)\n shutil.copy_file(xml_file, outfile)\n\n @staticmethod\n def parse_xml_file(xml_file) :\n with open(str(xml_file)) as f :\n try : return [record for record in NCBIXML.parse(f)]\n except ValueError : return [] # empty results\n\n def parse_xml_files(self) :\n\n if not self.xml_files :\n raise ValueError(\"No xml files produced or provided\")\n\n return chain(* (QBlastResult.parse_xml_file(xml_file)\n for xml_file in self.xml_files))\n\n def as_table(self) :\n records = self.parse_xml_files()\n rows = []\n\n for query, subject, hsp in BCLineHSPFuse.iter_hsp(records) :\n row = {}\n\n row[\"qname\"], row[\"sname\"] = query[0], subject[0]\n row[\"qlen\"], row[\"slen\"] = query[1], subject[1]\n row[\"length\"], row[\"gap\"] = hsp.align_length, hsp.gaps\n row[\"hsp_ident\"], row[\"hsp_pos\"] = hsp.identities, hsp.positives\n row[\"expect\"] = hsp.expect\n\n rows.append(row)\n\n df = pd.DataFrame(rows)\n if df.empty : return df\n\n df[\"ppos\"] = df[\"hsp_pos\"] * 100 / df[\"length\"]\n df[\"pident\"] = df[\"hsp_ident\"] * 100 / df[\"length\"]\n\n df[\"qide_prc\"] = df[\"hsp_ident\"] * 100 / df[\"qlen\"]\n df[\"qpos_prc\"] = df[\"hsp_pos\"] * 100 / df[\"qlen\"]\n df[\"side_prc\"] = df[\"hsp_ident\"] * 100 / df[\"slen\"]\n df[\"spos_prc\"] = df[\"hsp_pos\"] * 100 / df[\"slen\"]\n\n return df\n\n def hsp_fuse(self) :\n records = self.parse_xml_files()\n return BCLineHSPFuse.get_sum_sim_all(records)\n\n\n# --------------------------------------------------\n# Called function\n# --------------------------------------------------\n\ndef launch_qblasts(program, database, fasta_list, chunk_size=None, time_wait=15, ** kwargs) :\n # Run parallel or single qblast queries over ncbi using threads\n # https://blast.ncbi.nlm.nih.gov/Blast.cgi?CMD=Web&PAGE_TYPE=BlastDocs&DOC_TYPE=DeveloperInfo\n # time wait corresponds to the time beetween two parallele requests, if you don't want that\n # ncbi kicks your ass, do not lower this value\n\n fasta_list = glob.glob(fasta_list) if isinstance(fasta_list, str) else fasta_list\n qbr, qm = QueriesManager.from_multiple_fastas(program, database, fasta_list, chunk_size=chunk_size, ** kwargs)\n qm.run(time_wait)\n return qbr\n", "repo_name": "jsgounot/PyBlast", "sub_path": "pyblast/qblast.py", "file_name": "qblast.py", "file_ext": "py", "file_size_in_byte": 7595, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "datetime.datetime.now", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 27, "usage_type": "attribute"}, {"api_name": "Bio.Blast.NCBIWWW.qblast", "line_number": 45, "usage_type": "call"}, {"api_name": "Bio.Blast.NCBIWWW", "line_number": 45, "usage_type": "name"}, {"api_name": "pyblast.utils.TMPFname", "line_number": 48, "usage_type": "call"}, {"api_name": "pyblast.utils", "line_number": 48, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 83, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 86, "usage_type": "call"}, {"api_name": "Bio.SeqIO.parse", "line_number": 113, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 113, "usage_type": "name"}, {"api_name": "pyblast.BCLine.grouper_fdata", "line_number": 119, "usage_type": "call"}, {"api_name": "pyblast.BCLine", "line_number": 119, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 159, "usage_type": "call"}, {"api_name": "shutil.copy_file", "line_number": 175, "usage_type": "call"}, {"api_name": "Bio.Blast.NCBIXML.parse", "line_number": 180, "usage_type": "call"}, {"api_name": "Bio.Blast.NCBIXML", "line_number": 180, "usage_type": "name"}, {"api_name": "itertools.chain", "line_number": 188, "usage_type": "call"}, {"api_name": "pyblast.BCLineHSPFuse.iter_hsp", "line_number": 195, "usage_type": "call"}, {"api_name": "pyblast.BCLineHSPFuse", "line_number": 195, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 206, "usage_type": "call"}, {"api_name": "pyblast.BCLineHSPFuse.get_sum_sim_all", "line_number": 221, "usage_type": "call"}, {"api_name": "pyblast.BCLineHSPFuse", "line_number": 221, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 234, "usage_type": "call"}]} +{"seq_id": "2573877838", "text": "\nimport pandas as pd\nimport glob\nimport json\nfrom tqdm import tqdm\n\n\n#for dataset from aihub & korean gov org of korean lang\ndef file_kor(file_atcl, file_labl):\n article = []\n tagged_id = []\n\n for idx, itm in enumerate(file_atcl['document']):\n for jtm in tqdm(itm):\n stnc_atcl = [ktm['form'] for ktm in jtm['paragraph']]\n article.append(stnc_atcl)\n\n for ktm in file_labl['data']:\n for ltm in ktm:\n if(ltm['document_id'] == jtm['id']):\n tagged_id.append([jtm['paragraph'].index(mtm[\"topic_sentences\"]) for mtm in ltm])\n break\n break\n return pd.DataFrame({'atcl': article, 'tgid': tagged_id})\n\ndef file_aihub(file):\n article = []\n tagged_id = []\n for itm in list(file['documents']):\n for jtm in itm:\n if(len(jtm['text']) < 2):\n article.append([ktm['sentence'] for ktm in jtm['text'][0]] )\n tagged_id.append(jtm['extractive'])\n else:\n if(isinstance(jtm['extractive'][0], list)):\n article = article[:len(tagged_id)]\n continue\n else:\n if(None in jtm['extractive']):\n article = article[:len(tagged_id)]\n continue\n article.append([ltm['sentence'] for ktm in jtm['text'] for ltm in ktm])\n tagged_id.append(jtm['extractive'])\n\n return pd.DataFrame({'atcl': article, 'tgid': tagged_id})\n\ndef file_conc(dir):\n file_list = glob.glob(dir.replace(\"/\", \"\\\\\") + '/*.json')\n\n df_list = []\n\n for itm in file_list:\n with open(itm, \"rb\") as f:\n df_list.append(pd.json_normalize(json.loads(f.read())))\n\n conc = pd.concat(df_list)\n\n return conc\n\n", "repo_name": "Kwon-GiHwan/BertSum-korean", "sub_path": "src/pre_process/data_parser.py", "file_name": "data_parser.py", "file_ext": "py", "file_size_in_byte": 1846, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "tqdm.tqdm", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 24, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 45, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 48, "usage_type": "call"}, {"api_name": "pandas.json_normalize", "line_number": 54, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 54, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 56, "usage_type": "call"}]} +{"seq_id": "4940919206", "text": "import os\nimport logging\nfrom datetime import datetime\nfrom utils.logger import setlogger\n\n\ninclude_keys = ['max_epoch', 'crop_size', 'extra_aug', 'downsample_ratio', 'lr', 'lr_lbfgs', 'scheduler', 'cost', 'scale', 'reach',\n 'blur', 'scaling', 'tau', 'p', 'p_norm', 'norm_coord', 'phi', 'rho', 'd_point', 'd_pixel', 'batch_size']\n\ndef get_run_name_by_args(args, include_keys=None, exclude_keys=None):\n data = args.__dict__\n result = []\n if include_keys:\n for k in include_keys:\n result.append(f'{k}_{data[k]}')\n else:\n for k, v in data.items():\n if exclude_keys and k in exclude_keys:\n continue\n result.append(f'{k}_{v}')\n return '_'.join(result)\n\n\ndef rename_if_exist(path):\n base_path = path\n i = 1\n while os.path.exists(path):\n path = base_path + f'_({i})'\n return path\n\n\nclass Trainer(object):\n def __init__(self, args):\n if os.path.exists(args.resume):\n self.save_dir = os.path.dirname(args.resume)\n else:\n self.save_dir = os.path.join(args.save_dir, get_run_name_by_args(args, include_keys) + '_' + datetime.strftime(datetime.now(), '%m%d-%H%M%S'))\n args.save_dir = self.save_dir\n os.makedirs(self.save_dir, exist_ok=True)\n setlogger(os.path.join(self.save_dir, 'train.log')) # set logger\n for k, v in args.__dict__.items(): # save args\n logging.info(\"{}: {}\".format(k, v))\n self.args = args\n\n def setup(self):\n \"\"\"initial the datasets, model, loss and optimizer\"\"\"\n pass\n\n def train(self):\n \"\"\"training one epoch\"\"\"\n pass\n", "repo_name": "Nightmare4214/GeneralizedLoss-Counting-Pytorch-semi", "sub_path": "utils/trainer.py", "file_name": "trainer.py", "file_ext": "py", "file_size_in_byte": 1661, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "94", "api": [{"api_name": "os.path.exists", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strftime", "line_number": 37, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 37, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 37, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 39, "usage_type": "call"}, {"api_name": "utils.logger.setlogger", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "30068023650", "text": "import numpy as np\r\nfrom sklearn.feature_selection import SelectKBest\r\nfrom sklearn.feature_selection import f_classif\r\nimport preprocessNFL as pre\r\n\r\n# teamStatNames = ['Yds','Yds/G','PYds','RYds','PTS/G']\r\n# teamPassNames = ['PYds/A', 'Passer Rating']\r\n# teamReceiveNames = ['ReYds', 'ReYds/G']\r\n\r\ndfX = pre.cleanTrainingX()\r\ndfY = pre.cleanTrainingY(dfX)\r\ndfX.drop(['Winner', '@', 'Loser', 'PtsW', 'PtsL'], axis=1, inplace=True)\r\ndfUnplayed = pre.getUnplayedGames()\r\ndfPredict = pre.cleanPredictions()\r\n\r\nX = dfX.values\r\nY = dfY.values\r\n\r\n# feature extraction\r\ntest = SelectKBest(score_func=f_classif, k=4)\r\nfit = test.fit(X,Y)\r\n\r\n# summarize scores\r\nnp.set_printoptions(precision=3)\r\nprint(fit.scores_)\r\nfeatures = fit.transform(X)\r\nprint(features[0:5,:])\r\n\r\n# best 4 features are:\r\n# PTS/G\r\n# Passer Rating\r\n# Yds\r\n# Yds/G\r\n", "repo_name": "jpollmann102/sports-predictions", "sub_path": "featureSelection.py", "file_name": "featureSelection.py", "file_ext": "py", "file_size_in_byte": 829, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "94", "api": [{"api_name": "preprocessNFL.cleanTrainingX", "line_number": 10, "usage_type": "call"}, {"api_name": "preprocessNFL.cleanTrainingY", "line_number": 11, "usage_type": "call"}, {"api_name": "preprocessNFL.getUnplayedGames", "line_number": 13, "usage_type": "call"}, {"api_name": "preprocessNFL.cleanPredictions", "line_number": 14, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.SelectKBest", "line_number": 20, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.f_classif", "line_number": 20, "usage_type": "name"}, {"api_name": "numpy.set_printoptions", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "12138377278", "text": "#!/usr/bin/env python3\n\nimport os\n\nimport zmq\n\ncontext = zmq.Context()\nsock_address = os.environ.get('SOCK_ADDRESS', 'localhost:5555')\nsocket = context.socket(zmq.SUB)\nsocket.connect(f'tcp://{sock_address}')\nsocket.setsockopt(zmq.SUBSCRIBE, b'timecube')\n\nwhile True:\n payload = socket.recv_string()\n topic, identifier, side = payload.split()\n print(f'topic={topic}, identifier={identifier}, side={side}')\n", "repo_name": "patrickbucher/inf-stud-hslu", "sub_path": "aiot/src/subscriber/subscribe.py", "file_name": "subscribe.py", "file_ext": "py", "file_size_in_byte": 414, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 16, "dataset": "github-code", "pt": "94", "api": [{"api_name": "zmq.Context", "line_number": 7, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 8, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 8, "usage_type": "attribute"}, {"api_name": "zmq.SUB", "line_number": 9, "usage_type": "attribute"}, {"api_name": "zmq.SUBSCRIBE", "line_number": 11, "usage_type": "attribute"}]} +{"seq_id": "36407958091", "text": "'''\nConvert geometry from internal coordinate to Cartesian coordinate\nOptionally, convert gradient as well\n\nUnit:\nLength and energy are the same to your input\nAngle is assumed to be in radius\n\nDefault file format:\ninternal coordinate definition: Fortran-Library default format\ngeometry: 3 numbers/line for internal coordinate, xyz for Cartesian coordinate\n output = input.xyz\ngradient: 3 numbers/line\n output = input.cartgrad\n'''\n\nimport argparse\nfrom pathlib import Path\nimport numpy\nimport FortranLibrary as FL\nimport basic.io\n\ndef parse_args() -> argparse.Namespace: # Command line input\n parser = argparse.ArgumentParser(__doc__)\n parser.add_argument('format', type=str, help='File format: Columbus7 or default')\n parser.add_argument('IntCoordDef', type=Path, help='internal coordinate definition file')\n parser.add_argument('geom', type=Path, help='geometry file')\n parser.add_argument('init', type=Path, help='initial Cartesian guess geometry file')\n parser.add_argument('-g','--grad', type=Path, help='gradient file')\n parser.add_argument('-o','--output', type=Path, help='geometry output file')\n parser.add_argument('-go','--gradoutput', type=Path, help='gradient output file')\n args = parser.parse_args()\n return args\n\nif __name__ == \"__main__\":\n ''' Initialize '''\n args = parse_args()\n if args.format == 'Columbus7':\n # Define internal coordinate\n intdim, _ = FL.DefineInternalCoordinate('Columbus7', file=args.IntCoordDef)\n # Read geometry\n q = basic.io.read_geom_int(args.geom,intdim)\n # Read initial Cartesian guess geometry\n NAtoms, symbol, number, r0, mass = basic.io.read_geom_Columbus7(args.init)\n # Read gradient\n if args.grad != None: intgrad = basic.io.read_grad_int(args.grad,intdim)\n else:\n # Define internal coordinate\n intdim, _ = FL.DefineInternalCoordinate(args.format, file=args.IntCoordDef)\n # Read geometry\n q = basic.io.read_geom_int(args.geom,intdim)\n # Read initial Cartesian guess geometry\n NAtoms, symbol, r0 = basic.io.read_geom_xyz(args.init)\n # Read gradient\n if args.grad != None: intgrad = basic.io.read_grad_int(args.grad,intdim)\n ''' Do the job '''\n cartdim = 3*NAtoms; r = numpy.empty(cartdim)\n if args.grad == None:\n FL.CartesianCoordinate(q, r, r0=r0)\n else:\n cartgrad = numpy.empty(cartdim)\n FL.Internal2Cartesian(q, intgrad, r, cartgrad, r0=r0)\n ''' Output '''\n if args.format == 'Columbus7':\n if args.output == None:\n basic.io.write_geom_Columbus7(Path('geom'), NAtoms, symbol, number, r, mass)\n else:\n basic.io.write_geom_Columbus7(args.output, NAtoms, symbol, number, r, mass)\n if args.grad != None:\n if args.gradoutput == None:\n basic.io.write_grad_cart(Path('cartgrd'), cartgrad)\n else:\n basic.io.write_grad_cart(args.gradoutput, cartgrad)\n else:\n if args.output == None:\n basic.io.write_geom_xyz(Path(str(args.geom)+'.xyz'), NAtoms, symbol, r)\n else:\n basic.io.write_geom_xyz(args.output, NAtoms, symbol, r)\n if args.grad != None:\n if args.gradoutput == None:\n basic.io.write_grad_cart(Path(str(args.grad)+'.cartgrad'), cartgrad)\n else:\n basic.io.write_grad_cart(args.gradoutput, cartgrad)", "repo_name": "YifanShenSZ/Tool-Collection", "sub_path": "python/int2cart.py", "file_name": "int2cart.py", "file_ext": "py", "file_size_in_byte": 3446, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 24, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 26, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 27, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 28, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 29, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 30, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 31, "usage_type": "name"}, {"api_name": "argparse.Namespace", "line_number": 23, "usage_type": "attribute"}, {"api_name": "FortranLibrary.DefineInternalCoordinate", "line_number": 40, "usage_type": "call"}, {"api_name": "basic.io.io.read_geom_int", "line_number": 42, "usage_type": "call"}, {"api_name": "basic.io.io", "line_number": 42, "usage_type": "attribute"}, {"api_name": "basic.io", "line_number": 42, "usage_type": "name"}, {"api_name": "basic.io.io.read_geom_Columbus7", "line_number": 44, "usage_type": "call"}, {"api_name": "basic.io.io", "line_number": 44, "usage_type": "attribute"}, {"api_name": "basic.io", "line_number": 44, "usage_type": "name"}, {"api_name": "basic.io.io.read_grad_int", "line_number": 46, "usage_type": "call"}, {"api_name": "basic.io.io", "line_number": 46, "usage_type": "attribute"}, {"api_name": "basic.io", "line_number": 46, "usage_type": "name"}, {"api_name": "FortranLibrary.DefineInternalCoordinate", "line_number": 49, "usage_type": "call"}, {"api_name": "basic.io.io.read_geom_int", "line_number": 51, "usage_type": "call"}, {"api_name": "basic.io.io", "line_number": 51, "usage_type": "attribute"}, {"api_name": "basic.io", "line_number": 51, "usage_type": "name"}, {"api_name": "basic.io.io.read_geom_xyz", "line_number": 53, "usage_type": "call"}, {"api_name": "basic.io.io", "line_number": 53, "usage_type": "attribute"}, {"api_name": "basic.io", "line_number": 53, "usage_type": "name"}, {"api_name": "basic.io.io.read_grad_int", "line_number": 55, "usage_type": "call"}, {"api_name": "basic.io.io", "line_number": 55, "usage_type": "attribute"}, {"api_name": "basic.io", "line_number": 55, "usage_type": "name"}, {"api_name": "numpy.empty", "line_number": 57, "usage_type": "call"}, {"api_name": "FortranLibrary.CartesianCoordinate", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 61, "usage_type": "call"}, {"api_name": "FortranLibrary.Internal2Cartesian", "line_number": 62, "usage_type": "call"}, {"api_name": "basic.io.io.write_geom_Columbus7", "line_number": 66, "usage_type": "call"}, {"api_name": "basic.io.io", "line_number": 66, "usage_type": "attribute"}, {"api_name": "basic.io", "line_number": 66, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 66, "usage_type": "call"}, {"api_name": "basic.io.io.write_geom_Columbus7", "line_number": 68, "usage_type": "call"}, {"api_name": "basic.io.io", "line_number": 68, "usage_type": "attribute"}, {"api_name": "basic.io", "line_number": 68, "usage_type": "name"}, {"api_name": "basic.io.io.write_grad_cart", "line_number": 71, "usage_type": "call"}, {"api_name": "basic.io.io", "line_number": 71, "usage_type": "attribute"}, {"api_name": "basic.io", "line_number": 71, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 71, "usage_type": "call"}, {"api_name": "basic.io.io.write_grad_cart", "line_number": 73, "usage_type": "call"}, {"api_name": "basic.io.io", "line_number": 73, "usage_type": "attribute"}, {"api_name": "basic.io", "line_number": 73, "usage_type": "name"}, {"api_name": "basic.io.io.write_geom_xyz", "line_number": 76, "usage_type": "call"}, {"api_name": "basic.io.io", "line_number": 76, "usage_type": "attribute"}, {"api_name": "basic.io", "line_number": 76, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 76, "usage_type": "call"}, {"api_name": "basic.io.io.write_geom_xyz", "line_number": 78, "usage_type": "call"}, {"api_name": "basic.io.io", "line_number": 78, "usage_type": "attribute"}, {"api_name": "basic.io", "line_number": 78, "usage_type": "name"}, {"api_name": "basic.io.io.write_grad_cart", "line_number": 81, "usage_type": "call"}, {"api_name": "basic.io.io", "line_number": 81, "usage_type": "attribute"}, {"api_name": "basic.io", "line_number": 81, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 81, "usage_type": "call"}, {"api_name": "basic.io.io.write_grad_cart", "line_number": 83, "usage_type": "call"}, {"api_name": "basic.io.io", "line_number": 83, "usage_type": "attribute"}, {"api_name": "basic.io", "line_number": 83, "usage_type": "name"}]} +{"seq_id": "10014221597", "text": "from tkinter import *\r\nimport sqlite3\r\nconnection = sqlite3.connect(\"Event_Management.db\")\r\ncursor= connection.cursor()\r\ndef destructTotalCost(window):\r\n window.destroy()\r\n TotalCostGUI()\r\n \r\ndef getID(val):\r\n global Id\r\n rem=val\r\n Id=val[0]\r\n \r\ndef calculate():\r\n global t,e1,rowCount\r\n spent=0\r\n rec=0\r\n\r\n #Price of the event\r\n f9=\"SELECT CHARGE FROM EVENTS WHERE E_ID=?\"\r\n cursor.execute(f9,[(e1.get())])\r\n val13=cursor.fetchone()\r\n price=val13[0]\r\n print(\"Price: \",price)\r\n\r\n #Charges for guests\r\n f1=(\"SELECT SUM(CHARGE) FROM GUEST_EVENT WHERE E_ID=?\")\r\n cursor.execute(f1,[(e1.get())])\r\n val=cursor.fetchone() \r\n val1=val[0]\r\n spent=spent+val1\r\n\r\n #Charges for resources\r\n f2=(\"SELECT R_ID,QUANTITY FROM RESOURCE_EVENT WHERE E_ID=?\")\r\n cursor.execute(f2,[(e1.get())])\r\n val2=cursor.fetchall() \r\n for row in val2:\r\n f3=(\"SELECT PRICE FROM RESOURCES WHERE R_ID=?\")\r\n cursor.execute(f3,[(row[0])])\r\n val3=cursor.fetchone()\r\n val4=val3[0]\r\n spent=spent+(val4*row[1])\r\n\r\n #Money recieved from college audience\r\n f4=(\"SELECT COUNT(A_ID) FROM CAUDIENCE_EVENT WHERE E_ID=?\")\r\n cursor.execute(f4,[(e1.get())])\r\n val5=cursor.fetchone()\r\n val6=val5[0]\r\n val14=price*val6\r\n print(\"college: \",val14)\r\n rec=rec+val14\r\n\r\n #Money recieved from external audience\r\n f5=(\"SELECT COUNT(EA_ID) FROM EAUDIENCE_EVENT WHERE E_ID=?\")\r\n cursor.execute(f5,[(e1.get())])\r\n val7=cursor.fetchone()\r\n val8=val7[0]\r\n val15=val8*price\r\n rec=rec+val15\r\n print(\"external: \",val15)\r\n totalspent=spent-rec\r\n f6=(\"SELECT BUDGET FROM EVENTS WHERE E_ID=?\")\r\n cursor.execute(f6,[(e1.get())])\r\n val9=cursor.fetchone()\r\n val10=val9[0]\r\n f9=(\"SELECT C_ID FROM EVENTS WHERE E_ID=?\")\r\n cursor.execute(f9,[(e1.get())])\r\n val13=cursor.fetchone()\r\n cid=val13[0]\r\n f7=(\"SELECT FUNDS FROM COMMITTEE WHERE C_ID=?\")\r\n cursor.execute(f7,[(cid)])\r\n val11=cursor.fetchone()\r\n val12=val11[0]\r\n saved=val10-totalspent\r\n finBudget=val12+saved\r\n f8=(\"UPDATE COMMITTEE SET FUNDS=? WHERE C_ID=?\")\r\n cursor.execute(f8,[(finBudget),(cid)])\r\n str1=\"THE TOTAL MONEY SPENT FOR THIS EVENT(RESOURCE + GUEST CHARGERS) IS: RS. \"+str(spent)\r\n str2=\"THE TOTAL MONEY RECIEVED FROM AUDIENCE: RS. \"+str(rec)\r\n str3=\"MONEY REMAINING IN EVENT BUDGET: RS. \"+str(saved)\r\n str5=\"THE UPDATED FUNDS OF THE COMMITTEE IS NOW : RS. \"+str(finBudget)\r\n str4=\"CURRENT ORIGINAL FUNDS OF THE COMMITTEE: RS. \"+str(val12)\r\n rowCount=rowCount+1\r\n l2=Label(t,text=str1,font=\"Times 12 bold\",bg=\"black\",foreground=\"snow\").grid(row=rowCount,column=0)\r\n rowCount=rowCount+1\r\n l3=Label(t,text=str2,font=\"Times 12 bold\",bg=\"black\",foreground=\"snow\").grid(row=rowCount,column=0)\r\n rowCount=rowCount+1\r\n l4=Label(t,text=str3,font=\"Times 12 bold\",bg=\"black\",foreground=\"snow\").grid(row=rowCount,column=0)\r\n rowCount=rowCount+1\r\n l5=Label(t,text=str4,font=\"Times 12 bold\",bg=\"black\",foreground=\"snow\").grid(row=rowCount,column=0)\r\n rowCount=rowCount+1\r\n l6=Label(t,text=str5,font=\"Times 12 bold\",bg=\"black\",foreground=\"snow\").grid(row=rowCount,column=0)\r\n rowCount=rowCount+1\r\n\r\ndef TotalCostGUI():\r\n global e1,t,Id,rowCount\r\n t=Tk()\r\n rowCount=4\r\n t.configure(bg=\"black\")\r\n t.title(\"Calculating Total Cost\")\r\n l=Label(t,text=\"HERE YOU CAN DO THE COST EVALUATION OF THE EVENT!\",font=\"Times 16 bold\",bg=\"black\",foreground=\"snow\")\r\n l.grid(row=0,column=0)\r\n la=Label(t,text=\"The event available for your committee are: \",font=\"Times 12 italic\",bg=\"black\",foreground=\"snow\").grid(row=1,column=0)\r\n lid=Label(t,text=\"ID\",font=\"Times 12 italic\",bg=\"black\",foreground=\"snow\").grid(row=2,column=0)\r\n lname=Label(t,text=\"Name\",font=\"Times 12 italic\",bg=\"black\",foreground=\"snow\").grid(row=2,column=1)\r\n lyear=Label(t,text=\"Year\",font=\"Times 12 italic\",bg=\"black\",foreground=\"snow\").grid(row=2,column=2)\r\n f=(\"SELECT E_ID,NAME,YEAR FROM EVENTS WHERE C_ID=?\")\r\n cursor.execute(f,[(Id)])\r\n for row in cursor.fetchall():\r\n for i in range(0,3):\r\n lt=Label(t,text=row[i],font=\"Times 12 bold\",bg=\"black\",foreground=\"snow\").grid(row=rowCount,column=i)\r\n rowCount=rowCount+1\r\n rowCount=rowCount+1 \r\n l1=Label(t,text=\"Enter the event id whose cost is to be calculated: \",font=\"Times 15 italic\",bg=\"black\",foreground=\"snow\").grid(row=rowCount,column=0)\r\n rowCount=rowCount+1\r\n e1=Entry(t)\r\n e1.grid(row=rowCount,column=0)\r\n rowCount=rowCount+1\r\n b1=Button(t,text=\"Submit\",font=\"Times 10\",activebackground=\"red\",command=calculate).grid(row=rowCount,column=0)\r\n rowCount=rowCount+1\r\n\r\n \r\n\r\n \r\n", "repo_name": "vaishnavi-ui/event-database-management", "sub_path": "Total_Cost.py", "file_name": "Total_Cost.py", "file_ext": "py", "file_size_in_byte": 4751, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "94", "api": [{"api_name": "sqlite3.connect", "line_number": 3, "usage_type": "call"}]} +{"seq_id": "20641214413", "text": "import itertools\n\nproducts = ['pi', 'wa', 'sn', 'sh']\nrates = {'pi-pi': 1, 'pi-wa': 0.5, 'pi-sn': 1.45, 'pi-sh': 0.75, 'wa-pi': 1.95, 'wa-wa': 1, 'wa-sn': 3.1, 'wa-sh': 1.49, 'sn-pi': 0.67, 'sn-wa': 0.31, 'sn-sn': 1, 'sn-sh': 0.48, 'sh-pi': 1.34, 'sh-wa': 0.64, 'sh-sn': 1.98, 'sh-sh': 1}\n\n\ncombos = [x for x in itertools.product(products, repeat=4)]\nmoney = 2000000\ncombs = []\n\nfor combo in combos:\n combo = ('sh',) + combo + ('sh',)\n combs.append(combo)\n\nmoneys = []\n\nfor combo in combs:\n for i in range(len(combo)):\n if i != len(combo) - 1:\n money *= rates[f'{combo[i]}-{combo[i+1]}']\n print(combo, money)\n moneys.append(money)\n money = 2000000\n\nprint(max(moneys))", "repo_name": "naman-doshi/IMC-Prosperity", "sub_path": "brute-manual-trading/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 703, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "itertools.product", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "42337942035", "text": "from django.shortcuts import render, get_object_or_404, redirect\nfrom .forms import PostForm, CommentForm, ReplyForm\nfrom .models import Post, Comment, Reply\nfrom django.contrib.auth import get_user_model\nfrom django.http import HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils import timezone\n\n# Create your views here.\n\n@login_required\ndef createPost(request):\n User = get_user_model()\n if request.method == 'POST':\n form = PostForm(request.POST)\n if form.is_valid():\n username = request.POST['username']\n userSelected = User.objects.get(username=username)\n post = form.save(commit=False)\n post.user = userSelected\n post.save()\n return redirect('blog:postDetail', pk=Post.objects.latest('pk').pk)\n else:\n form = PostForm()\n return render(request, 'blog/createPost.html', {'form':form, 'formName':'Create'})\n\n@login_required\ndef createComment(request, pk):\n post = get_object_or_404(Post, pk=pk)\n if request.method == 'POST':\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.author = request.POST['username']\n comment.post = post\n comment.save()\n return redirect('blog:postDetail', pk=post.pk)\n else:\n form = CommentForm()\n return render(request, 'blog/commentForm.html', {'form':form, 'formName':'Comment'})\n\ndef postList(request):\n data = Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')\n return render(request, 'blog/postList.html', {'data':data})\n\ndef postDraft(request):\n data = Post.objects.filter(published_date__isnull=True).order_by('create_date')\n return render(request, 'blog/postDraft.html', {'data':data})\n\n@login_required\ndef commentApprove(request, pk):\n comment = get_object_or_404(Comment, pk=pk)\n comment.approve()\n return redirect('blog:postDetail', pk=comment.post.pk)\n\n@login_required\ndef commentRemove(request, pk):\n comment = get_object_or_404(Comment, pk=pk)\n postPK = comment.post.pk\n comment.delete()\n return redirect('blog:postDetail', pk=postPK)\n\ndef postDetail(request, pk):\n post = get_object_or_404(Post, pk=pk)\n return render(request, 'blog/postDetail.html', {'post':post})\n\n@login_required\ndef postEdit(request, pk):\n post = get_object_or_404(Post, pk=pk)\n if request.method == 'POST':\n form = PostForm(request.POST)\n if form.is_valid():\n post.title = form.cleaned_data['title']\n post.message = form.cleaned_data['message']\n post.image = form.cleaned_data['image']\n post.save()\n return redirect('blog:postDetail', pk=pk)\n else:\n form = PostForm(initial = {\n 'title' : post.title, 'message' : post.message\n })\n return render(request, 'blog/createPost.html', {'form':form, 'formName':'Edit', 'postValue':post})\n\n@login_required\ndef postDelete(request, pk):\n post = get_object_or_404(Post, pk=pk)\n post.delete()\n return redirect('blog:postList')\n\n@login_required\ndef postPublish(request, pk):\n post = get_object_or_404(Post, pk=pk)\n post.published()\n return redirect('blog:postDetail', pk=pk)\n\ndef userPosts(request, userName):\n posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')\n return render(request, 'blog/userPosts.html', {'posts':posts, 'userPost':userName})\n\n@login_required\ndef newReply(request, pk):\n commentObj = get_object_or_404(Comment, pk=pk)\n if request.method == \"POST\":\n form = ReplyForm(request.POST)\n if form.is_valid():\n reply = form.save(commit=False)\n reply.comment = commentObj\n reply.author = request.POST['username']\n reply.save()\n return redirect('blog:postDetail', pk=commentObj.post.pk)\n else:\n form = ReplyForm()\n return render(request, 'blog/commentForm.html', {'form':form, 'formName':'Reply'})\n", "repo_name": "hruturaj/django-Blog", "sub_path": "blog/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4067, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "django.contrib.auth.get_user_model", "line_number": 13, "usage_type": "call"}, {"api_name": "forms.PostForm", "line_number": 15, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 22, "usage_type": "call"}, {"api_name": "models.Post.objects.latest", "line_number": 22, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 22, "usage_type": "name"}, {"api_name": "forms.PostForm", "line_number": 24, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 25, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 11, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 29, "usage_type": "call"}, {"api_name": "models.Post", "line_number": 29, "usage_type": "argument"}, {"api_name": "forms.CommentForm", "line_number": 31, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 37, "usage_type": "call"}, {"api_name": "forms.CommentForm", "line_number": 39, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 40, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 27, "usage_type": "name"}, {"api_name": "models.Post.objects.filter", "line_number": 43, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 43, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 43, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 43, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 43, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 44, "usage_type": "call"}, {"api_name": "models.Post.objects.filter", "line_number": 47, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 47, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 47, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 48, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 52, "usage_type": "call"}, {"api_name": "models.Comment", "line_number": 52, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 54, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 50, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 58, "usage_type": "call"}, {"api_name": "models.Comment", "line_number": 58, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 61, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 56, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 64, "usage_type": "call"}, {"api_name": "models.Post", "line_number": 64, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 65, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 69, "usage_type": "call"}, {"api_name": "models.Post", "line_number": 69, "usage_type": "argument"}, {"api_name": "forms.PostForm", "line_number": 71, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 77, "usage_type": "call"}, {"api_name": "forms.PostForm", "line_number": 79, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 82, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 67, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 86, "usage_type": "call"}, {"api_name": "models.Post", "line_number": 86, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 88, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 84, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 92, "usage_type": "call"}, {"api_name": "models.Post", "line_number": 92, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 94, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 90, "usage_type": "name"}, {"api_name": "models.Post.objects.filter", "line_number": 97, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 97, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 97, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 97, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 97, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 98, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 102, "usage_type": "call"}, {"api_name": "models.Comment", "line_number": 102, "usage_type": "argument"}, {"api_name": "forms.ReplyForm", "line_number": 104, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 110, "usage_type": "call"}, {"api_name": "forms.ReplyForm", "line_number": 112, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 113, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 100, "usage_type": "name"}]} +{"seq_id": "27170207650", "text": "# Loading Libraries\nimport os\nimport pandas as pd\nimport numpy as np\nimport torch\nimport torchtext\nfrom configparser import ConfigParser, ExtendedInterpolation\nfrom modules import ReadParams, DownloadData, ReadData, PrepareData\nfrom modules import TransformText, BuildModel\nprint(\"Libraries loaded\")\n\n# Reading config\nconfig = ConfigParser(interpolation=ExtendedInterpolation())\nconfig.read('train/config.ini')\nprint(\"Configuration loaded\")\n\n# Reading Parameters for the training\nread_params = ReadParams(config)\nconfig, params = read_params.run()\nprint(\"Parameters loaded\")\n\n# Downloading data\ndownload_data = DownloadData(config, params)\ndownload_data.run()\nprint(\"Downloaded data\")\n\n# Reading downloaded data\nread_data = ReadData(config, params)\ntraining_data = read_data.run()\nprint(\"Data loaded\")\n\n# Generating training pairs\nprepare_data = PrepareData(config, params)\ntraining_sets = prepare_data.run(training_data)\nprint(\"Training pairs generated\")\n\n# Generates vocabs and transforms data for training\ntransform_text = TransformText(config, params)\n(training_input, vocabs) = transform_text.run(training_sets)\nprint(\"Transformed training data\")\n\n# Creates and trains the model\nbuild_model = BuildModel(config, params)\ncode2doc_train = build_model.run(training_input, vocabs)\nprint(\"Model trained\")\n\n# # Evaluates model\n# eval_model = EvalModel(config, params)\n# model_score = eval_model.run(code2doc_train, training_sets, vocabs)\n# print(\"Model scored on validation data\")\n\n# test model inference\n# from utils.code2doc_utils import Code2DocInfer\n# model_infer = Code2DocInfer(code2doc_train, vocabs)\n# model_infer.predict(enc_input[:10])", "repo_name": "santiagomvc/code2doc", "sub_path": "train/code2doc.py", "file_name": "code2doc.py", "file_ext": "py", "file_size_in_byte": 1646, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "configparser.ConfigParser", "line_number": 13, "usage_type": "call"}, {"api_name": "configparser.ExtendedInterpolation", "line_number": 13, "usage_type": "call"}, {"api_name": "modules.ReadParams", "line_number": 18, "usage_type": "call"}, {"api_name": "modules.DownloadData", "line_number": 23, "usage_type": "call"}, {"api_name": "modules.ReadData", "line_number": 28, "usage_type": "call"}, {"api_name": "modules.PrepareData", "line_number": 33, "usage_type": "call"}, {"api_name": "modules.TransformText", "line_number": 38, "usage_type": "call"}, {"api_name": "modules.BuildModel", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "18968123316", "text": "import sys\nimport os\n\nfrom nltk.corpus import wordnet as wn\n\nfrom mnemmaj import tree_of_splits, explore, parse_digit_to_word_list\n\n\ndummy, width = os.popen('stty size', 'r').read().split()\n\nlookup_table = parse_digit_to_word_list()\n\nseq = sys.argv[1]\n\n# max_parts = int(sys.argv[2])\n\nprint('seq:', seq)\n# print 'max_parts:', max_parts\n\n# tree = tree_of_splits(seq, 1)\n\n# def node_test(node):\n# return (node in lookup_table)\n\ndef iterchunks(iterator, n):\n \"\"\"Iterate returning n results at a time\"\"\"\n iterator = iter(iterator)\n return zip(*([iterator]*n))\n\n# chunks = list(explore(tree, node_test=node_test))[0]\n# print chunks\n# words = [lookup_table[chunk] for chunk in chunks]\n\nwords = lookup_table[seq]\n\nwords = [word for word in words if any((ss.pos == 'n' for ss in wn.synsets(word)))]\n\nmax_word_length = max([len(word) for word in words]) + 3\n\ncols = (int(width) / max_word_length) - 1\n\nfor line in iterchunks(words, cols):\n for word in line:\n print(word.ljust(max_word_length))\n print(\"...\")", "repo_name": "johtso/mnemonic_major", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1024, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "os.popen", "line_number": 9, "usage_type": "call"}, {"api_name": "mnemmaj.parse_digit_to_word_list", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 13, "usage_type": "attribute"}, {"api_name": "nltk.corpus.wordnet.synsets", "line_number": 36, "usage_type": "call"}, {"api_name": "nltk.corpus.wordnet", "line_number": 36, "usage_type": "name"}]} +{"seq_id": "70387072310", "text": "from django.shortcuts import render, get_object_or_404\nfrom django.shortcuts import redirect\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom .models import Item, Order, OrderItem\nfrom django.views.generic import ListView, DetailView, View\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib import messages\nfrom django.utils import timezone\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse\n# Create your views here.\nfrom .models import *\n\nfrom .forms import CreateUserForm, CheckoutForm, CouponForm, RefundForm, PaymentForm\nimport random\nimport string\nfrom paypal.standard.forms import PayPalPaymentsForm\nfrom django.views.decorators.csrf import csrf_exempt\nfrom decimal import *\n\"\"\" import stripe \"\"\"\n\"\"\" stripe.api_key = settings.STRIPE_SECRET_KE \"\"\"\n\"\"\" from .filters import OrderFilter \"\"\"\n\n\ndef create_ref_code():\n return ''.join(random.choices(string.ascii_lowercase + string.digits, k=20))\n\n\ndef home(request):\n\n items = Item.objects.all()\n return render(request, 'home-page.html', {'items': items})\n\n\ndef product(request):\n\n # dests = Destination.objects.all()\n return render(request, \"product-page.html\")\n\n\n\"\"\" def checkout(request):\n\n # dests = Destination.objects.all()\n return render(request, \"checkout-page.html\")\n \"\"\"\n\n\ndef registerPage(request):\n if request.user.is_authenticated:\n return redirect('home')\n else:\n form = CreateUserForm()\n if request.method == 'POST':\n form = CreateUserForm(request.POST)\n if form.is_valid():\n form.save()\n user = form.cleaned_data.get('username')\n messages.success(request, 'Account was created for ' + user)\n\n return redirect('login')\n\n context = {'form': form}\n return render(request, 'register.html', context)\n\n\ndef logoutUser(request):\n logout(request)\n return redirect('login')\n\n\ndef loginPage(request):\n if request.user.is_authenticated:\n return redirect('/')\n else:\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n\n user = authenticate(request, username=username, password=password)\n\n if user is not None:\n login(request, user)\n return redirect('/')\n else:\n messages.info(request, 'Username OR password is incorrect')\n\n context = {}\n return render(request, 'login.html', context)\n\n\nclass ItemDetailView(DetailView):\n model = Item\n template_name = \"product-page.html\"\n\n\nclass RequestRefundView(View):\n pass\n\n\n@login_required\ndef add_to_cart(request, slug):\n item = get_object_or_404(Item, slug=slug)\n order_item, created = OrderItem.objects.get_or_create(\n item=item,\n user=request.user,\n ordered=False\n )\n order_qs = Order.objects.filter(user=request.user, ordered=False)\n if order_qs.exists():\n order = order_qs[0]\n # check if the order item is in the order\n if order.items.filter(item__slug=item.slug).exists():\n order_item.quantity += 1\n order_item.save()\n messages.info(request, \"This item quantity was updated.\")\n return redirect(\"order-summary\")\n\n else:\n order.items.add(order_item)\n messages.info(request, \"This item was added to your cart.\")\n return redirect(\"order-summary\")\n\n else:\n ordered_date = timezone.now()\n order = Order.objects.create(\n user=request.user, ordered_date=ordered_date)\n order.items.add(order_item)\n messages.info(request, \"This item was added to your cart.\")\n return redirect(\"order-summary\")\n\n\n@login_required\ndef remove_from_cart(request, slug):\n item = get_object_or_404(Item, slug=slug)\n order_qs = Order.objects.filter(\n user=request.user,\n ordered=False\n )\n if order_qs.exists():\n order = order_qs[0]\n # check if the order item is in the order\n if order.items.filter(item__slug=item.slug).exists():\n order_item = OrderItem.objects.filter(\n item=item,\n user=request.user,\n ordered=False\n )[0]\n order.items.remove(order_item)\n order_item.delete()\n messages.info(request, \"This item was removed from your cart.\")\n return redirect(\"order-summary\")\n else:\n messages.info(request, \"This item was not in your cart\")\n return redirect(\"product\", slug=slug)\n else:\n messages.info(request, \"You do not have an active order\")\n return redirect(\"product\", slug=slug)\n\n\n@login_required\ndef remove_single_item_from_cart(request, slug):\n item = get_object_or_404(Item, slug=slug)\n order_qs = Order.objects.filter(\n user=request.user,\n ordered=False\n )\n if order_qs.exists():\n order = order_qs[0]\n # check if the order item is in the order\n if order.items.filter(item__slug=item.slug).exists():\n order_item = OrderItem.objects.filter(\n item=item,\n user=request.user,\n ordered=False\n )[0]\n if order_item.quantity > 1:\n order_item.quantity -= 1\n order_item.save()\n else:\n order.items.remove(order_item)\n messages.info(request, \"This item quantity was updated.\")\n return redirect(\"order-summary\")\n else:\n messages.info(request, \"This item was not in your cart\")\n return redirect(\"product\", slug=slug)\n else:\n messages.info(request, \"You do not have an active order\")\n return redirect(\"product\", slug=slug)\n\n\nclass OrderSummaryView(LoginRequiredMixin, View):\n def get(self, *args, **kwargs):\n try:\n order = Order.objects.get(user=self.request.user, ordered=False)\n context = {\n 'object': order\n }\n return render(self.request, 'order_summary.html', context)\n except ObjectDoesNotExist:\n messages.warning(self.request, \"You do not have an active order\")\n return redirect(\"/\")\n\n\nclass CheckoutView(View):\n def get(self, *args, **kwargs):\n try:\n order = Order.objects.get(user=self.request.user, ordered=False)\n form = CheckoutForm()\n context = {\n 'form': form,\n 'couponform': CouponForm(),\n 'order': order,\n 'DISPLAY_COUPON_FORM': True\n }\n\n shipping_address_qs = Address.objects.filter(\n user=self.request.user,\n address_type='S',\n default=True\n )\n if shipping_address_qs.exists():\n context.update(\n {'default_shipping_address': shipping_address_qs[0]})\n\n billing_address_qs = Address.objects.filter(\n user=self.request.user,\n address_type='B',\n default=True\n )\n if billing_address_qs.exists():\n context.update(\n {'default_billing_address': billing_address_qs[0]})\n\n return render(self.request, \"checkout.html\", context)\n except ObjectDoesNotExist:\n messages.info(self.request, \"You do not have an active order\")\n return redirect(\"checkout\")\n\n def post(self, *args, **kwargs):\n form = CheckoutForm(self.request.POST or None)\n try:\n order = Order.objects.get(user=self.request.user, ordered=False)\n if form.is_valid():\n\n use_default_shipping = form.cleaned_data.get(\n 'use_default_shipping')\n if use_default_shipping:\n print(\"Using the defualt shipping address\")\n address_qs = Address.objects.filter(\n user=self.request.user,\n address_type='S',\n default=True\n )\n if address_qs.exists():\n shipping_address = address_qs[0]\n order.shipping_address = shipping_address\n order.save()\n else:\n messages.info(\n self.request, \"No default shipping address available\")\n return redirect('checkout')\n else:\n print(\"User is entering a new shipping address\")\n shipping_address1 = form.cleaned_data.get(\n 'shipping_address')\n shipping_address2 = form.cleaned_data.get(\n 'shipping_address2')\n shipping_country = form.cleaned_data.get(\n 'shipping_country')\n shipping_zip = form.cleaned_data.get('shipping_zip')\n\n if is_valid_form([shipping_address1, shipping_country, shipping_zip]):\n shipping_address = Address(\n user=self.request.user,\n street_address=shipping_address1,\n apartment_address=shipping_address2,\n country=shipping_country,\n zip=shipping_zip,\n address_type='S'\n )\n shipping_address.save()\n\n order.shipping_address = shipping_address\n order.save()\n\n set_default_shipping = form.cleaned_data.get(\n 'set_default_shipping')\n if set_default_shipping:\n shipping_address.default = True\n shipping_address.save()\n\n else:\n messages.info(\n self.request, \"Please fill in the required shipping address fields\")\n\n use_default_billing = form.cleaned_data.get(\n 'use_default_billing')\n same_billing_address = form.cleaned_data.get(\n 'same_billing_address')\n\n if same_billing_address:\n billing_address = shipping_address\n billing_address.pk = None\n billing_address.save()\n billing_address.address_type = 'B'\n billing_address.save()\n order.billing_address = billing_address\n order.save()\n\n elif use_default_billing:\n print(\"Using the defualt billing address\")\n address_qs = Address.objects.filter(\n user=self.request.user,\n address_type='B',\n default=True\n )\n if address_qs.exists():\n billing_address = address_qs[0]\n order.billing_address = billing_address\n order.save()\n else:\n messages.info(\n self.request, \"No default billing address available\")\n return redirect('checkout')\n else:\n print(\"User is entering a new billing address\")\n billing_address1 = form.cleaned_data.get(\n 'billing_address')\n billing_address2 = form.cleaned_data.get(\n 'billing_address2')\n billing_country = form.cleaned_data.get(\n 'billing_country')\n billing_zip = form.cleaned_data.get('billing_zip')\n\n if is_valid_form([billing_address1, billing_country, billing_zip]):\n billing_address = Address(\n user=self.request.user,\n street_address=billing_address1,\n apartment_address=billing_address2,\n country=billing_country,\n zip=billing_zip,\n address_type='B'\n )\n billing_address.save()\n\n order.billing_address = billing_address\n order.save()\n\n set_default_billing = form.cleaned_data.get(\n 'set_default_billing')\n if set_default_billing:\n billing_address.default = True\n billing_address.save()\n\n else:\n messages.info(\n self.request, \"Please fill in the required billing address fields\")\n\n payment_option = form.cleaned_data.get('payment_option')\n\n if payment_option == 'P':\n return redirect('payment')\n \n elif payment_option == 'C':\n return redirect('paymentbycash')\n else:\n messages.warning(\n self.request, \"Invalid payment option selected\")\n return redirect('checkout')\n except ObjectDoesNotExist:\n messages.warning(self.request, \"You do not have an active order\")\n return redirect(\"order-summary\")\n\n\ndef is_valid_form(values):\n valid = True\n for field in values:\n if field == '':\n valid = False\n return valid\n\n\ndef get_coupon(request, code):\n try:\n coupon = Coupon.objects.get(code=code)\n return coupon\n except ObjectDoesNotExist:\n messages.info(request, \"This coupon does not exist\")\n return redirect('checkout')\n\n\nclass AddCouponView(View):\n def post(self, *args, **kwargs):\n form = CouponForm(self.request.POST or None)\n if form.is_valid():\n try:\n code = form.cleaned_data.get('code')\n order = Order.objects.get(\n user=self.request.user, ordered=False)\n order.coupon = get_coupon(self.request, code)\n order.save()\n messages.success(self.request, \"Successfully added coupon\")\n return redirect(\"checkout\")\n except ObjectDoesNotExist:\n messages.info(self.request, \"You do not have an active order\")\n return redirect(\"checkout\")\n\n\nclass PaymentView(View):\n\n def get(self, *args, **kwargs):\n order = Order.objects.get(user=self.request.user, ordered=False)\n #messages.success(self.request, \"Your order was successful!\")\n # create the payment\n payment = Payment()\n # payment.stripe_charge_id = charge['id']\n payment.user = self.request.user\n payment.amount = order.get_total()\n payment.save()\n# assign the payment to the order\n order_items = order.items.all()\n order_items.update(ordered=True)\n for item in order_items:\n item.save()\n order.ordered = True\n order.payment = payment\n order.ref_code = create_ref_code()\n order.save()\n messages.success(self.request, \"Your order was second successful!\")\n return redirect('/')\n\n\ndef process_payment(request):\n order_id = create_ref_code() # an order id created by our own function\n order = Order.objects.get(user=request.user, ordered=False) #User order object\n total = order.get_total() #Total amount to be pain by paypal\n host = request.get_host() # user name to be recorded during transaction\n paypal_dict = {\n 'business': settings.PAYPAL_RECEIVER_EMAIL, 'amount': total, 'item_name': 'Order {}'.format(order.id), 'invoice': str(order.id), 'currency_code': 'USD', 'notify_url': 'http://{}{}'.format(host, reverse('paypal-ipn')), 'return_url': 'http://{}{}'.format(host, reverse('payment_done')), 'cancel_return': 'http://{}{}'.format(host, reverse('payment_cancelled')), }\n form = PayPalPaymentsForm(initial=paypal_dict)\n return render(request, 'process_payment.html', {'order': order, 'form': form})\n\n\n@csrf_exempt\ndef payment_done(request):\n return render(request, 'payment_done.html')\n\n\n@csrf_exempt\ndef payment_canceled(request):\n return render(request, 'payment_cancelled.html')\n", "repo_name": "johnbangla/showcasephase1", "sub_path": "app1/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 16731, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "random.choices", "line_number": 28, "usage_type": "call"}, {"api_name": "string.ascii_lowercase", "line_number": 28, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 28, "usage_type": "attribute"}, {"api_name": "models.Item.objects.all", "line_number": 33, "usage_type": "call"}, {"api_name": "models.Item.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "models.Item", "line_number": 33, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 34, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 40, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 52, "usage_type": "call"}, {"api_name": "forms.CreateUserForm", "line_number": 54, "usage_type": "call"}, {"api_name": "forms.CreateUserForm", "line_number": 56, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 60, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 60, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 62, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 65, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 69, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 70, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 75, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 81, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 84, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 85, "usage_type": "call"}, {"api_name": "django.contrib.messages.info", "line_number": 87, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 87, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 90, "usage_type": "call"}, {"api_name": "django.views.generic.DetailView", "line_number": 93, "usage_type": "name"}, {"api_name": "models.Item", "line_number": 94, "usage_type": "name"}, {"api_name": "django.views.generic.View", "line_number": 98, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 104, "usage_type": "call"}, {"api_name": "models.Item", "line_number": 104, "usage_type": "argument"}, {"api_name": "models.OrderItem.objects.get_or_create", "line_number": 105, "usage_type": "call"}, {"api_name": "models.OrderItem.objects", "line_number": 105, "usage_type": "attribute"}, {"api_name": "models.OrderItem", "line_number": 105, "usage_type": "name"}, {"api_name": "models.Order.objects.filter", "line_number": 110, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 110, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 110, "usage_type": "name"}, {"api_name": "django.contrib.messages.info", "line_number": 117, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 117, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 118, "usage_type": "call"}, {"api_name": "django.contrib.messages.info", "line_number": 122, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 122, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 123, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 126, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 126, "usage_type": "name"}, {"api_name": "models.Order.objects.create", "line_number": 127, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 127, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 127, "usage_type": "name"}, {"api_name": "django.contrib.messages.info", "line_number": 130, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 130, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 131, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 102, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 136, "usage_type": "call"}, {"api_name": "models.Item", "line_number": 136, "usage_type": "argument"}, {"api_name": "models.Order.objects.filter", "line_number": 137, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 137, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 137, "usage_type": "name"}, {"api_name": "models.OrderItem.objects.filter", "line_number": 145, "usage_type": "call"}, {"api_name": "models.OrderItem.objects", "line_number": 145, "usage_type": "attribute"}, {"api_name": "models.OrderItem", "line_number": 145, "usage_type": "name"}, {"api_name": "django.contrib.messages.info", "line_number": 152, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 152, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 153, "usage_type": "call"}, {"api_name": "django.contrib.messages.info", "line_number": 155, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 155, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 156, "usage_type": "call"}, {"api_name": "django.contrib.messages.info", "line_number": 158, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 158, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 159, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 134, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 164, "usage_type": "call"}, {"api_name": "models.Item", "line_number": 164, "usage_type": "argument"}, {"api_name": "models.Order.objects.filter", "line_number": 165, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 165, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 165, "usage_type": "name"}, {"api_name": "models.OrderItem.objects.filter", "line_number": 173, "usage_type": "call"}, {"api_name": "models.OrderItem.objects", "line_number": 173, "usage_type": "attribute"}, {"api_name": "models.OrderItem", "line_number": 173, "usage_type": "name"}, {"api_name": "django.contrib.messages.info", "line_number": 183, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 183, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 184, "usage_type": "call"}, {"api_name": "django.contrib.messages.info", "line_number": 186, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 186, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 187, "usage_type": "call"}, {"api_name": "django.contrib.messages.info", "line_number": 189, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 189, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 190, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 162, "usage_type": "name"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 193, "usage_type": "name"}, {"api_name": "django.views.generic.View", "line_number": 193, "usage_type": "name"}, {"api_name": "models.Order.objects.get", "line_number": 196, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 196, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 196, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 200, "usage_type": "call"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 201, "usage_type": "name"}, {"api_name": "django.contrib.messages.warning", "line_number": 202, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 202, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 203, "usage_type": "call"}, {"api_name": "django.views.generic.View", "line_number": 206, "usage_type": "name"}, {"api_name": "models.Order.objects.get", "line_number": 209, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 209, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 209, "usage_type": "name"}, {"api_name": "forms.CheckoutForm", "line_number": 210, "usage_type": "call"}, {"api_name": "forms.CouponForm", "line_number": 213, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 236, "usage_type": "call"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 237, "usage_type": "name"}, {"api_name": "django.contrib.messages.info", "line_number": 238, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 238, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 239, "usage_type": "call"}, {"api_name": "forms.CheckoutForm", "line_number": 242, "usage_type": "call"}, {"api_name": "models.Order.objects.get", "line_number": 244, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 244, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 244, "usage_type": "name"}, {"api_name": "django.contrib.messages.info", "line_number": 261, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 261, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 263, "usage_type": "call"}, {"api_name": "django.contrib.messages.info", "line_number": 295, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 295, "usage_type": "name"}, {"api_name": "django.contrib.messages.info", "line_number": 324, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 324, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 326, "usage_type": "call"}, {"api_name": "django.contrib.messages.info", "line_number": 358, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 358, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 364, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 367, "usage_type": "call"}, {"api_name": "django.contrib.messages.warning", "line_number": 369, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 369, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 371, "usage_type": "call"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 372, "usage_type": "name"}, {"api_name": "django.contrib.messages.warning", "line_number": 373, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 373, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 374, "usage_type": "call"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 389, "usage_type": "name"}, {"api_name": "django.contrib.messages.info", "line_number": 390, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 390, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 391, "usage_type": "call"}, {"api_name": "django.views.generic.View", "line_number": 394, "usage_type": "name"}, {"api_name": "forms.CouponForm", "line_number": 396, "usage_type": "call"}, {"api_name": "models.Order.objects.get", "line_number": 400, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 400, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 400, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 404, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 404, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 405, "usage_type": "call"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 406, "usage_type": "name"}, {"api_name": "django.contrib.messages.info", "line_number": 407, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 407, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 408, "usage_type": "call"}, {"api_name": "django.views.generic.View", "line_number": 411, "usage_type": "name"}, {"api_name": "models.Order.objects.get", "line_number": 414, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 414, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 414, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 431, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 431, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 432, "usage_type": "call"}, {"api_name": "models.Order.objects.get", "line_number": 437, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 437, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 437, "usage_type": "name"}, {"api_name": "paypal.standard.forms.PayPalPaymentsForm", "line_number": 442, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 443, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 448, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 446, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 453, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 451, "usage_type": "name"}]} +{"seq_id": "3045338075", "text": "import concurrent\nimport json\nimport math\nimport warnings\nfrom concurrent.futures import ThreadPoolExecutor\nfrom typing import Any, Dict, List, Optional\n\nimport numpy\nimport pyarrow.flight as flight\nfrom pandas import DataFrame\nfrom pyarrow import Table\nfrom tqdm.auto import tqdm\n\nfrom .graph_constructor import GraphConstructor\n\n\nclass ArrowGraphConstructor(GraphConstructor):\n def __init__(\n self,\n database: str,\n graph_name: str,\n flight_client: flight.FlightClient,\n concurrency: int,\n undirected_relationship_types: Optional[List[str]],\n chunk_size: int = 10_000,\n ):\n self._database = database\n self._concurrency = concurrency\n self._graph_name = graph_name\n self._client = flight_client\n self._undirected_relationship_types = (\n [] if undirected_relationship_types is None else undirected_relationship_types\n )\n self._chunk_size = chunk_size\n self._min_batch_size = chunk_size * 10\n\n def run(self, node_dfs: List[DataFrame], relationship_dfs: List[DataFrame]) -> None:\n try:\n config: Dict[str, Any] = {\n \"name\": self._graph_name,\n \"database_name\": self._database,\n }\n\n if self._undirected_relationship_types:\n config[\"undirected_relationship_types\"] = self._undirected_relationship_types\n\n self._send_action(\n \"CREATE_GRAPH\",\n config,\n )\n\n self._send_dfs(node_dfs, \"node\")\n\n self._send_action(\"NODE_LOAD_DONE\", {\"name\": self._graph_name})\n\n self._send_dfs(relationship_dfs, \"relationship\")\n\n self._send_action(\"RELATIONSHIP_LOAD_DONE\", {\"name\": self._graph_name})\n except (Exception, KeyboardInterrupt) as e:\n self._send_action(\"ABORT\", {\"name\": self._graph_name})\n\n raise e\n\n def _partition_dfs(self, dfs: List[DataFrame]) -> List[DataFrame]:\n partitioned_dfs: List[DataFrame] = []\n\n for df in dfs:\n num_rows = df.shape[0]\n num_batches = math.ceil(num_rows / self._min_batch_size)\n\n # pandas 2.1.0 deprecates swapaxes, but numpy did not catch up yet.\n warnings.filterwarnings(\n \"ignore\",\n message=(\n r\"^'DataFrame.swapaxes' is deprecated and will be removed in a future version. \"\n + r\"Please use 'DataFrame.transpose' instead.$\"\n ),\n )\n partitioned_dfs += numpy.array_split(df, num_batches) # type: ignore\n\n return partitioned_dfs\n\n def _send_action(self, action_type: str, meta_data: Dict[str, Any]) -> None:\n result = self._client.do_action(flight.Action(action_type, json.dumps(meta_data).encode(\"utf-8\")))\n\n # Consume result fully to sanity check and avoid cancelled streams\n collected_result = list(result)\n assert len(collected_result) == 1\n\n json.loads(collected_result[0].body.to_pybytes().decode())\n\n def _send_df(self, df: DataFrame, entity_type: str, pbar: tqdm) -> None:\n table = Table.from_pandas(df)\n batches = table.to_batches(self._chunk_size)\n flight_descriptor = {\"name\": self._graph_name, \"entity_type\": entity_type}\n\n # Write schema\n upload_descriptor = flight.FlightDescriptor.for_command(json.dumps(flight_descriptor).encode(\"utf-8\"))\n writer, _ = self._client.do_put(upload_descriptor, table.schema)\n\n with writer:\n # Write table in chunks\n for partition in batches:\n writer.write_batch(partition)\n pbar.update(partition.num_rows)\n\n def _send_dfs(self, dfs: List[DataFrame], entity_type: str) -> None:\n desc = \"Uploading Nodes\" if entity_type == \"node\" else \"Uploading Relationships\"\n pbar = tqdm(total=sum([df.shape[0] for df in dfs]), unit=\"Records\", desc=desc)\n\n partitioned_dfs = self._partition_dfs(dfs)\n\n with ThreadPoolExecutor(self._concurrency) as executor:\n futures = [executor.submit(self._send_df, df, entity_type, pbar) for df in partitioned_dfs]\n\n for future in concurrent.futures.as_completed(futures):\n if not future.exception():\n continue\n raise future.exception() # type: ignore\n", "repo_name": "neo4j/graph-data-science-client", "sub_path": "graphdatascience/query_runner/arrow_graph_constructor.py", "file_name": "arrow_graph_constructor.py", "file_ext": "py", "file_size_in_byte": 4385, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 145, "dataset": "github-code", "pt": "94", "api": [{"api_name": "graph_constructor.GraphConstructor", "line_number": 17, "usage_type": "name"}, {"api_name": "pyarrow.flight.FlightClient", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pyarrow.flight", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 37, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 37, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 39, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 39, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 64, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 64, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 65, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 65, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 69, "usage_type": "call"}, {"api_name": "warnings.filterwarnings", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.array_split", "line_number": 79, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 83, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 83, "usage_type": "name"}, {"api_name": "pyarrow.flight.Action", "line_number": 84, "usage_type": "call"}, {"api_name": "pyarrow.flight", "line_number": 84, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 84, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 90, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 92, "usage_type": "name"}, {"api_name": "tqdm.auto.tqdm", "line_number": 92, "usage_type": "name"}, {"api_name": "pyarrow.Table.from_pandas", "line_number": 93, "usage_type": "call"}, {"api_name": "pyarrow.Table", "line_number": 93, "usage_type": "name"}, {"api_name": "pyarrow.flight.FlightDescriptor.for_command", "line_number": 98, "usage_type": "call"}, {"api_name": "pyarrow.flight.FlightDescriptor", "line_number": 98, "usage_type": "attribute"}, {"api_name": "pyarrow.flight", "line_number": 98, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 98, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 107, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 107, "usage_type": "name"}, {"api_name": "tqdm.auto.tqdm", "line_number": 109, "usage_type": "call"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 113, "usage_type": "call"}, {"api_name": "concurrent.futures.as_completed", "line_number": 116, "usage_type": "call"}, {"api_name": "concurrent.futures", "line_number": 116, "usage_type": "attribute"}]} +{"seq_id": "34904931351", "text": "import asyncio\nimport datetime\nimport json\nimport os\nimport sys\nfrom hashlib import md5\nfrom itertools import islice\n\nimport aiohttp\nfrom dotenv import load_dotenv\n\nload_dotenv(\"credentials.env\")\n\nAPI_KEY = os.environ[\"LASTFM_API_KEY\"]\nAPI_SECRET = os.environ[\"LASTFM_API_SECRET\"]\nUSERNAME = os.environ[\"LASTFM_USERNAME\"]\nPASSWORD = os.environ[\"LASTFM_PASSWORD\"]\n\n\nasync def lastfm_login():\n data = await lastfm_request(\n {\n \"method\": \"auth.getMobileSession\",\n \"username\": USERNAME,\n \"password\": PASSWORD,\n }\n )\n try:\n session = data[\"session\"][\"key\"]\n print(\"Logged in as\", data[\"session\"][\"name\"])\n return session\n except KeyError:\n print(\"Could not get session\")\n print(data)\n\n\ndef sign_call(params):\n signature = \"\"\n for p in sorted(params):\n signature += p\n signature += params[p]\n signature += API_SECRET\n signature = md5(signature.encode(\"utf-8\")).hexdigest()\n return signature\n\n\nasync def scrobble(session_key, track_data, timestamp: datetime.datetime):\n session = aiohttp.ClientSession()\n for chunk_n, tracks in enumerate(chunk(track_data, 50)):\n params = {\"sk\": session_key, \"method\": \"track.scrobble\"}\n for i, track in enumerate(tracks):\n params[f\"artist[{i}]\"] = track[\"artist\"]\n params[f\"track[{i}]\"] = track[\"track\"]\n params[f\"album[{i}]\"] = track[\"album\"]\n params[f\"timestamp[{i}]\"] = str(\n int(timestamp.timestamp()) + chunk_n * 50 * 60 + 60 * i\n )\n\n data = await lastfm_request(params, session)\n print(\"Scrobbling chunk\", chunk_n)\n try:\n print(data[\"scrobbles\"][\"@attr\"])\n for scrobble in data[\"scrobbles\"][\"scrobble\"]:\n ignored = scrobble[\"ignoredMessage\"]\n if ignored[\"code\"] != \"0\":\n print(scrobble[\"artist\"][\"#text\"], \"-\", scrobble[\"track\"][\"#text\"], ignored)\n except KeyError as e:\n print(e)\n print(data)\n\n # close opened session\n await session.close()\n\n\ndef chunk(it, size):\n it = iter(it)\n return iter(lambda: tuple(islice(it, size)), ())\n\n\nasync def lastfm_request(params: dict, session: aiohttp.ClientSession | None = None):\n\n base_url = \"https://ws.audioscrobbler.com/2.0\"\n params.update({\"api_key\": API_KEY})\n params.update(\n {\n \"api_sig\": sign_call(params),\n \"format\": \"json\",\n }\n )\n new_session = False\n if session is None:\n session = aiohttp.ClientSession()\n new_session = True\n\n async with session.post(url=base_url, params=params) as response:\n data = await response.json()\n\n if new_session:\n await session.close()\n\n # print(data)\n return data\n\n\nasync def main(filename):\n session_key = await lastfm_login()\n with open(filename, \"r\", encoding=\"utf-8\") as f:\n data = json.load(f)\n timestamp = datetime.datetime.now() - datetime.timedelta(13)\n print(f\"Starting to scrobble {len(data)} tracks with timestamp set to {timestamp} ...\")\n if \"-v\" in sys.argv:\n for track in data:\n print(\n \"--->\",\n datetime.datetime.fromtimestamp(track[\"timestamp\"]),\n track[\"artist\"],\n \"-\",\n track[\"track\"],\n )\n await scrobble(session_key, data, timestamp)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2 or sys.argv[1] in [\"-h\", \"--help\"]:\n print(\"Scrobble a converted json file generated by convert.py\")\n print()\n print(\"Usage:\")\n print(\"\\tpython scrobble.py [path/to/json] [-v]\")\n quit(1)\n asyncio.run(main(sys.argv[1]))\n", "repo_name": "joinemm/spotify-to-lastfm", "sub_path": "scrobble.py", "file_name": "scrobble.py", "file_ext": "py", "file_size_in_byte": 3794, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "94", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 12, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 17, "usage_type": "attribute"}, {"api_name": "hashlib.md5", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 47, "usage_type": "attribute"}, {"api_name": "aiohttp.ClientSession", "line_number": 48, "usage_type": "call"}, {"api_name": "itertools.islice", "line_number": 77, "usage_type": "call"}, {"api_name": "aiohttp.ClientSession", "line_number": 80, "usage_type": "attribute"}, {"api_name": "aiohttp.ClientSession", "line_number": 92, "usage_type": "call"}, {"api_name": "json.load", "line_number": 108, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 109, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 109, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 109, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 111, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 115, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 115, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 124, "usage_type": "attribute"}, {"api_name": "asyncio.run", "line_number": 130, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 130, "usage_type": "attribute"}]} +{"seq_id": "26140316938", "text": "from multiprocessing import Pool\nfrom multiprocessing import Manager\nimport subprocess\n\nclass AsyncTask:\n\n def __init__(self, name, command, queue):\n self.command = command\n self.queue = queue\n self.name = name\n\n def execute(self):\n print(f\"Starting Task: {self.name}\")\n\n try:\n result = subprocess.check_output(self.command.split(), stderr=subprocess.STDOUT)\n\n\n self.queue.put(result.decode())\n\n except Exception as e:\n self.queue.put(str(e))\n pass\n\n print(f\"Finished Task: {self.name}\")\n\n\n\nclass BenchmarkCommandBuilder:\n\n def __init__(self):\n self._stats_filename = \"RISCV.txt\"\n self._config_filename= \"CONFIG.ini\"\n self._l1i_size = 32\n self._l1i_assoc = 4\n self._l1d_size = 32\n self._l1d_assoc = 4\n self._cacheline_size = 64\n self._l2cache = False\n self._l2_size = 1024\n self._l2_assoc = 8\n self._cpu_type = \"DerivO3CPU\"\n self._maxinsts=100000000\n self._benchmark_path= \"../ML_Benchmark/Benchmarks/mlbench\"\n\n def set_l2cache(self, value: bool):\n self._l2cache = value\n return self\n\n def set_stats_filename(self, value):\n self._stats_filename = value\n return self\n\n def set_config_filename(self, value):\n self._config_filename = value\n return self\n\n def set_l1i_size(self, value):\n self._l1i_size = value\n return self\n\n def set_l1i_assoc(self, value):\n self._l1i_assoc = value\n return self\n\n def set_l1d_size(self, value):\n self._l1d_size = value\n return self\n\n def set_l1d_assoc(self, value):\n self._l1d_assoc = value\n return self\n\n def set_cacheline_size(self, value):\n self._cacheline_size = value\n return self\n\n def set_l2_size(self, value):\n self._l2_size = value\n return self\n\n def set_l2_assoc(self, value):\n self._l2_assoc = value\n return self\n\n def set_cpu_type(self, value):\n self._cpu_type = value\n return self\n\n def set_maxinsts(self, value):\n self._maxinsts = value\n return self\n\n def set_benchmark_path(self, value):\n self._benchmark_path = value\n return self\n\n def build(self):\n str = \"build/RISCV/gem5.opt \"\n str += f\"--stats-file={self._stats_filename:s} \"\n str += f\"--dump-config={self._config_filename:s} \"\n str += f\"configs/example/se.py \"\n str += f\"-c {self._benchmark_path} \"\n str += f\"--caches \"\n str += f\"--l1i_size={self._l1i_size}kB \"\n str += f\"--l1i_assoc={self._l1d_assoc} \"\n str += f\"--l1d_size={self._l1d_size}kB \"\n str += f\"--l1d_assoc={self._l1d_assoc} \"\n str += f\"--cacheline_size={self._cacheline_size} \"\n if self._l2cache:\n str += f\"--l2cache \"\n str += f\"--l2_size={self._l2_size}kB \"\n str += f\"--l2_assoc={self._l2_assoc} \"\n str += f\"--cpu-clock=1.6GHz \"\n str += f\"--cpu-type={self._cpu_type} \"\n str += f\" -n 1\"\n str += f\" --maxinsts={self._maxinsts} \"\n\n return str\n\n\ndef run_commands_async(commands, max_processes = 2):\n pool = Pool(max_processes)\n manager = Manager()\n queue = manager.Queue()\n\n tasks = []\n for command in commands:\n tasks.append(AsyncTask(command, command, queue))\n\n pool.map(AsyncTask.execute, tasks)\n\n while not queue.empty():\n print(queue.get())\n\n\n", "repo_name": "kmeister/ML_Benchmark", "sub_path": "scripts/benchmark_utils.py", "file_name": "benchmark_utils.py", "file_ext": "py", "file_size_in_byte": 3514, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "94", "api": [{"api_name": "subprocess.check_output", "line_number": 16, "usage_type": "call"}, {"api_name": "subprocess.STDOUT", "line_number": 16, "usage_type": "attribute"}, {"api_name": "multiprocessing.Pool", "line_number": 123, "usage_type": "call"}, {"api_name": "multiprocessing.Manager", "line_number": 124, "usage_type": "call"}]} +{"seq_id": "18860480150", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 27 12:09:03 2021\n\n@author: 13kau\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.optim as optim\nimport torch.utils.data\nfrom torch.autograd import Variable\n\n# Import the dataset\nmovies = pd.read_csv('ml-1m/movies.dat', sep = '::', header =None, engine='python', encoding= 'latin-1') \n# this dosent contain header So we specify header =None\n# engine='python' --> dataset to work efficient\n\n\nusers = pd.read_csv('ml-1m/users.dat', sep = '::', header =None, engine='python', encoding= 'latin-1')\n# id, gender, age\n\nratings = pd.read_csv('ml-1m/ratings.dat', sep = '::', header =None, engine='python', encoding= 'latin-1')\n# users, movie_id, ratings\n\n# Preparing the training set and the rest set!\ntraining_set = pd.read_csv('ml-100k/u1.base', delimiter='\\t') # has 80000 ratings\n# users, movie_id, ratings\n\n# convert training set to array\ntraining_set = np.array(training_set, dtype='int')\n\n\n# Prepare train and test set\ntest_set = pd.read_csv('ml-100k/u1.test', delimiter='\\t')\ntest_set = np.array(test_set, dtype='int')\n\n# Getting the number of movies and users\nnb_users = int(max(max(training_set[:,0]), max(test_set[:,0]))) # max of max because to ge the max of both test, training!\nnb_movies = int(max(max(training_set[:,1]), max(test_set[:,1])))\n\n# Converting the data into an array with users in lines and movies in column\ndef convert(data):\n new_data = [] #we do list of list, 1st list for 1st user, 2nd list for 2nd user etc\n for id_users in range(1, nb_users + 1):\n id_movies = data[:, 1][data[:,0] == id_users] # movies indexs of all rated movies\n id_ratings = data[:, 2][data[:,0] == id_users]# 1st list --> ratings of the 1st user and so on\n ratings = np.zeros(nb_movies) # list of nb_movies list\n ratings[id_movies - 1] = id_ratings # Replace 0 by 1 , '-1' --> because we have index starting from 0, 'id_ratings' --> update with real ratings\n new_data.append(list(ratings)) # for all the users\n return new_data\n\ntraining_set = convert(training_set)\n\ntest_set = convert(test_set)\n\n# Converting the data into torch tensors\ntraining_set = torch.FloatTensor(training_set)\ntest_set = torch.FloatTensor(test_set)\n\n# Creating the architecture of neural Network\nclass SAE(nn.Module):\n def __init__(self, ):\n super(SAE, self).__init__()\n # Starts withe nb_movies and ends with nb movies... Because the input and output should have same number!\n self.fc1 = nn.Linear(nb_movies, 20) #20 neuron in 1nd hidden layer # Encode\n self.fc2 = nn.Linear(20, 10) #10 neuron in 2nd hidden layer # Encode\n self.fc3 = nn.Linear(10, 20) #10 neuron in 3nd hidden layer # Decode\n self.fc4 = nn.Linear(20, nb_movies) #10 neuron in 4nd hidden layer # Decode\n self.activation = nn.Sigmoid() # Activation Function\n \n # Action that takes place in Network.. ie. Encoding and decoding with applying activation\n def forward(self, x):\n x = self.activation(self.fc1(x)) # first encoding, ass activation on fc1 fcl(x) --> left of fc1\n x = self.activation(self.fc2(x))\n x = self.activation(self.fc3(x)) # first decoding, ass activation on fc1 fc3(x) --> left of fc3\n x = self.fc4(x)\n return x\n\nsae = SAE()\ncriteronn = nn.MSELoss() # Loss\noptimizer = optim.RMSprop(sae.parameters(), lr=0.01, weight_decay=0.5) # lr --> learning rate\n\n# Training the SAE\nnb_epochs = 200\nfor eopchs in range(1, nb_epochs + 1): #loop epochs\n train_loss = 0\n s = 0. # user rated atleast 1 movie '0.' because to calculate the root mean squared error\n # loop users\n for id_user in range(nb_users):\n input = Variable(training_set[id_user]).unsqueeze(0) # Variable().unsqueeze() --> Additional dimention for batch\n target = input.clone() # copy of the inputs\n # to save as much memor as possible.. ie. if there is user with 0 ratings then it is excluded\n if torch.sum(target.data > 0) > 0:\n output = sae(input)\n target.require_grad = False\n output[target == 0] = 0\n loss = criteronn(output, target)\n mean_corrector = nb_movies/float(torch.sum(target.data > 0) + 1e-10)\n loss.backward()\n train_loss += np.sqrt(loss.data*mean_corrector)\n s += 1.\n optimizer.step()\n print('epoch: '+str(eopchs)+ 'loss:' +str(train_loss/s))\n\n# Testing the SAE\ntest_loss = 0\ns = 0.\nfor id_user in range(nb_users):\n input = Variable(training_set[id_user]).unsqueeze(0)\n target = Variable(test_set[id_user]).unsqueeze(0)\n if torch.sum(target.data > 0) > 0:\n output = sae(input)\n target.require_grad = False\n output[target == 0] = 0\n loss = criteronn(output, target)\n mean_corrector = nb_movies/float(torch.sum(target.data > 0) + 1e-10)\n test_loss += np.sqrt(loss.data*mean_corrector)\n s += 1.\nprint('test loss: '+str(test_loss/s))\n ", "repo_name": "Kaushik13k/DeepLearning", "sub_path": "_07_autoEncoders.py", "file_name": "_07_autoEncoders.py", "file_ext": "py", "file_size_in_byte": 4988, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "pandas.read_csv", "line_number": 18, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 23, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 26, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 34, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 65, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 65, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 69, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 70, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 71, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 72, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 73, "usage_type": "name"}, {"api_name": "torch.nn.MSELoss", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 84, "usage_type": "name"}, {"api_name": "torch.optim.RMSprop", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 85, "usage_type": "name"}, {"api_name": "torch.autograd.Variable", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 121, "usage_type": "call"}]} +{"seq_id": "4543165111", "text": "#Flask app\nfrom flask import Flask\nfrom flask_cors import CORS\nfrom flask_rest import v1, comment, comments, menu, menus, menuType, menuTypes, menuTypeItems\n\napp = Flask(__name__)\nCORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\n\napp.add_url_rule('/api/v1', 'v1', v1, methods=['GET', 'POST', 'PUT', 'PATCH', 'DELETE'])\napp.add_url_rule('/api/v1/comments', 'get_comments', comment, methods=['GET', 'POST'])\napp.add_url_rule('/api/v1/comments/<int:commentId>', 'comments', comments, methods=['GET', 'POST', 'PUT', 'PATCH', 'DELETE'])\n\napp.add_url_rule('/api/v1/menus', 'get_menus', menu, methods=['GET', 'POST'])\napp.add_url_rule('/api/v1/menus/<int:menuId>', 'menus', menus, methods=['GET', 'POST', 'PUT', 'PATCH', 'DELETE'])\n\napp.add_url_rule('/api/v1/menuTypes', 'get_menuTypes', menuType, methods=['GET', 'POST'])\napp.add_url_rule('/api/v1/menuTypes/<int:typeId>', 'menuTypes', menuTypes, methods=['GET', 'POST', 'PUT', 'PATCH', 'DELETE'])\n\napp.add_url_rule('/api/v1/menuTypeItems/<int:typeId>', 'menuTypeItems', menuTypeItems, methods=['GET'])\n\n@app.route('/')\ndef hello_world():\n return 'Hello from Flask! '+__name__+'*** '", "repo_name": "codoCodoG7/codoCodoG7.github.io", "sub_path": "python/flask_app.py", "file_name": "flask_app.py", "file_ext": "py", "file_size_in_byte": 1128, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 7, "usage_type": "call"}, {"api_name": "flask_rest.v1", "line_number": 9, "usage_type": "argument"}, {"api_name": "flask_rest.comment", "line_number": 10, "usage_type": "argument"}, {"api_name": "flask_rest.comments", "line_number": 11, "usage_type": "argument"}, {"api_name": "flask_rest.menu", "line_number": 13, "usage_type": "argument"}, {"api_name": "flask_rest.menus", "line_number": 14, "usage_type": "argument"}, {"api_name": "flask_rest.menuType", "line_number": 16, "usage_type": "argument"}, {"api_name": "flask_rest.menuTypes", "line_number": 17, "usage_type": "argument"}, {"api_name": "flask_rest.menuTypeItems", "line_number": 19, "usage_type": "argument"}]} +{"seq_id": "29646070245", "text": "import argparse\n# import logging\nimport os\nimport time\n\nimport matplotlib.pyplot as plt\n\n# from constants import DATA_HEADER, REGIONS_NUMS\nfrom download import REGIONS_NUMS, DataDownloader\n\n# from utils import PROFILE_LOG_LEVEL\n# from utils.logging import get_stat_logger as gsl\n\n\nparser = argparse.ArgumentParser(description='Get_stat parser')\nparser.add_argument(\"--fig_location\", action=\"store\", default=None,\n help=\"If used, save figure in the given folder.\")\nparser.add_argument(\"--show_figure\", action=\"store_true\", default=None,\n help=\"If set, show window with figure.\")\nparser.add_argument(\"-r\", \"--regions\", action=\"store\", default=None, nargs='+',\n help=\"\")\n\n\ndef touch(path):\n basedir = os.path.dirname(path)\n if not os.path.exists(basedir) and basedir != '':\n os.makedirs(basedir)\n\n with open(path, 'a'):\n os.utime(path, None)\n\n\ndef plot_stat(data_source, fig_location=None, show_figure=False, regions=None):\n \"\"\"\n\n :param regions:\n :param data_source:\n :param fig_location:\n :param show_figure:\n :return:\n \"\"\"\n processed_data = {} # Dictionary of all results.\n fig = plt.figure(figsize=(8.27, 11.69))\n plt.title('Nehodovost v krajích podle roku', size=20)\n plt.tick_params(\n axis='both', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom=False, # ticks along the bottom edge are off\n left=False, # ticks along the top edge are off\n labelbottom=False,\n labelleft=False\n )\n\n # start = time.perf_counter()\n axes = []\n # fig.add_subplot(111)\n for i, year in enumerate(range(16, 21)):\n year_mask = [int(data_source[i][0][-7:-5]) == year for i in range(data_source.shape[0])]\n year_data = data_source[year_mask]\n one_year = {} # Dictionary of result in one year.\n for region in regions:\n region_mask = [year_data[i][-1] == region for i in range(year_data.shape[0])]\n one_year[region] = year_data[region_mask].shape[0]\n processed_data[year] = one_year\n\n # Calculate position\n pairs = [(region, processed_data[year][region]) for region in regions]\n pairs.sort(key=lambda x: x[1], reverse=True)\n\n # Get first elements - regions - sorted in reversed order\n sorted_regions = [pair[0] for pair in pairs]\n\n # Place position values where the region sit at.\n positions = [0 for i in range(len(sorted_regions))]\n\n for index, region in enumerate(regions):\n positions[index] = sorted_regions.index(region)\n\n axes.append(fig.add_subplot(int(511 + i)))\n axes[i].set_xlabel(\"Kraje\", size=15)\n axes[i].set_ylabel(\"Počet nehod\", size=15)\n bars = axes[i].bar(processed_data[year].keys(), processed_data[year].values())\n new_top_lim = axes[i].get_ylim()[1] * 1.2\n axes[i].set_ylim(bottom=0, top=new_top_lim)\n # axes[i].grid(b=True, axis='y', which='major', alpha=0.4)\n # axes[i].yaxis.set_minor_locator(MultipleLocator(new_top_lim / 10))\n\n for bar_index, bar in enumerate(bars):\n axes[i].text(\n bar.get_x() + bar.get_width() / 2.,\n bar.get_height() + axes[i].get_ylim()[1] / 30,\n positions[bar_index] + 1,\n ha='center',\n va='bottom', size=13)\n\n if fig_location:\n touch(fig_location)\n plt.savefig(fig_location)\n if show_figure:\n fig.tight_layout()\n plt.show()\n\n # end = time.perf_counter()\n # gsl.profile(f\"Time: {end - start:0.4f} s\")\n\n\nif __name__ == '__main__':\n # Start measuring time\n start = time.perf_counter()\n\n # Parse args and get values\n args = parser.parse_args()\n fig_loc = args.fig_location\n show_fig = args.show_figure\n regs = args.regions\n\n # Check if the input is valid\n if not regs:\n regs = list(REGIONS_NUMS.keys())\n if not set(regs).issubset(set(REGIONS_NUMS.keys())):\n # gsl.log(logging.ERROR, f\"Passed region does not exist!\\nYou have passed {regs} \"\n # f\"while only these: {list(REGIONS_NUMS.keys())} are supported.\")\n print(f\"Passed region does not exist!\\nYou have passed {regs} \"\n f\"while only these: {list(REGIONS_NUMS.keys())} are supported.\")\n exit(-1)\n _, data = DataDownloader().get_list(regs)\n plot_stat(data_source=data, fig_location=fig_loc, show_figure=show_fig, regions=regs)\n # End measuring time\n end = time.perf_counter()\n # gsl.profile(f\"Time: {end - start:0.4f} s\")\n", "repo_name": "RichardKlem/IZV", "sub_path": "get_stat.py", "file_name": "get_stat.py", "file_ext": "py", "file_size_in_byte": 4646, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 27, "usage_type": "call"}, {"api_name": "os.utime", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tick_params", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "time.perf_counter", "line_number": 109, "usage_type": "call"}, {"api_name": "download.REGIONS_NUMS.keys", "line_number": 119, "usage_type": "call"}, {"api_name": "download.REGIONS_NUMS", "line_number": 119, "usage_type": "name"}, {"api_name": "download.REGIONS_NUMS.keys", "line_number": 120, "usage_type": "call"}, {"api_name": "download.REGIONS_NUMS", "line_number": 120, "usage_type": "name"}, {"api_name": "download.REGIONS_NUMS.keys", "line_number": 124, "usage_type": "call"}, {"api_name": "download.REGIONS_NUMS", "line_number": 124, "usage_type": "name"}, {"api_name": "download.DataDownloader", "line_number": 126, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 129, "usage_type": "call"}]} +{"seq_id": "26871703651", "text": "from __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nfrom builtins import super\nfrom builtins import str\nfrom builtins import int\nfrom builtins import range\nfrom builtins import dict\nfrom builtins import zip\nfrom future import standard_library\nstandard_library.install_aliases()\n\ntry:\n import __builtin__ as builtins\nexcept ImportError:\n import builtins\n\nfrom copy import copy\n\ntry:\n from PySpice.Unit.Unit import UnitValue\nexcept ImportError:\n # PySpice is not supported in Python 2, so need to make a dummy class\n # to replicate a class from PySpice.\n class UnitValue:\n pass\n\nfrom .defines import *\nfrom .utilities import *\n\n\nclass Part(object):\n \"\"\"\n A class for storing a definition of a schematic part.\n\n Attributes:\n ref: String storing the reference of a part within a schematic (e.g., 'R5').\n value: String storing the part value (e.g., '3K3').\n footprint: String storing the PCB footprint associated with a part (e.g., SOIC-8).\n pins: List of Pin objects for this part.\n\n Args:\n lib: Either a SchLib object or a schematic part library file name.\n name: A string with name of the part to find in the library, or to assign to\n the part defined by the part definition.\n dest: String that indicates where the part is destined for (e.g., LIBRARY).\n tool: The format for the library file or part definition (e.g., KICAD).\n connections: A dictionary with part pin names/numbers as keys and the\n names of nets to which they will be connected as values. For example:\n { 'IN-':'a_in', 'IN+':'GND', '1':'AMPED_OUTPUT', '14':'VCC', '7':'GND' }\n part_defn: A list of strings that define the part (usually read from a\n schematic library file).\n circuit: The Circuit object this Part belongs to.\n\n Keyword Args:\n attribs: Name/value pairs for setting attributes for the part.\n For example, manf_num='LM4808MP-8' would create an attribute\n named 'manf_num' for the part and assign it the value 'LM4808MP-8'.\n\n Raises:\n * Exception if the part library and definition are both missing.\n * Exception if an unknown file format is requested.\n \"\"\"\n\n def __init__(self,\n lib=None,\n name=None,\n dest=NETLIST,\n tool=None,\n connections=None,\n part_defn=None,\n circuit=None,\n **attribs):\n\n import skidl\n from .SchLib import SchLib\n from .defines import TEMPLATE, NETLIST, LIBRARY, SKIDL\n\n if tool is None:\n tool = skidl.get_default_tool()\n\n # Setup some part attributes that might be overwritten later on.\n self.do_erc = True # Allow part to be included in ERC.\n self.unit = {\n } # Dictionary for storing subunits of the part, if desired.\n self.pins = [] # Start with no pins, but a place to store them.\n self.name = name # Assign initial part name. (Must come after circuit is assigned.)\n self.description = '' # Make sure there is a description, even if empty.\n self._ref = '' # Provide a member for holding a reference.\n self.ref_prefix = '' # Provide a member for holding the part reference prefix.\n self.tool = tool # Initial type of part (SKIDL, KICAD, etc.)\n self.circuit = None # Part starts off unassociated with any circuit.\n\n # Create a Part from a library entry.\n if lib:\n # If the lib argument is a string, then create a library using the\n # string as the library file name.\n if isinstance(lib, basestring):\n try:\n libname = lib\n lib = SchLib(filename=libname, tool=tool)\n except Exception as e:\n if skidl.QUERY_BACKUP_LIB:\n logger.warning(\n 'Could not load KiCad schematic library \"{}\", falling back to backup library.'\n .format(libname))\n lib = skidl.load_backup_lib()\n if not lib:\n raise e\n else:\n raise e\n\n # Make a copy of the part from the library but don't add it to the netlist.\n part = lib[name].copy(dest=TEMPLATE)\n\n # Overwrite self with the new part.\n self.__dict__.update(part.__dict__)\n\n # Make sure all the pins have a valid reference to this part.\n self.associate_pins()\n\n # Store the library name of this part.\n self.lib = getattr(lib, 'filename', None)\n\n # Otherwise, create a Part from a part definition. If the part is\n # destined for a library, then just get its name. If it's going into\n # a netlist, then parse the entire part definition.\n elif part_defn:\n self.part_defn = part_defn\n self.parse(just_get_name=(dest != NETLIST))\n\n # If the part is destined for a SKiDL library, then it will be defined\n # by the additional attribute values that are passed.\n elif tool == SKIDL and name:\n pass\n\n else:\n logger.error(\n \"Can't make a part without a library & part name or a part definition.\"\n )\n raise Exception\n\n if 'ref' in attribs:\n self._ref = attribs['ref']\n\n # If the part is going to be an element in a circuit, then add it to the\n # the circuit and make any indicated pin/net connections.\n if dest != LIBRARY:\n if dest == NETLIST:\n # If no Circuit object is given, then use the default Circuit that always exists.\n # Always set circuit first because naming the part requires a lookup\n # of existing names in the circuit.\n if not circuit:\n circuit = default_circuit # pylint: disable=undefined-variable\n circuit += self\n elif dest == TEMPLATE:\n # If this is just a part template, don't add the part to the circuit.\n # Just place the reference to the Circuit object in the template.\n if not circuit:\n self.circuit = default_circuit # pylint: disable=undefined-variable\n self.circuit = circuit\n\n # Add any net/pin connections to this part that were passed as arguments.\n if isinstance(connections, dict):\n for pin, net in connections.items():\n net += self[pin]\n\n # Add any other passed-in attributes to the part.\n for k, v in attribs.items():\n setattr(self, k, v)\n\n def _find_min_max_pins(self):\n \"\"\" Return the minimum and maximum pin numbers for the part. \"\"\"\n pin_nums = []\n try:\n for p in self.pins:\n try:\n pin_nums.append(int(p.num))\n except ValueError:\n pass\n except AttributeError:\n # This happens if the part has no pins.\n pass\n try:\n return min(pin_nums), max(pin_nums)\n except ValueError:\n # This happens if the part has no integer-labeled pins.\n return 0, 0\n\n def parse(self, just_get_name=False):\n \"\"\"\n Create a part from its stored part definition.\n\n Args:\n just_get_name: When true, just get the name and aliases for the\n part. Leave the rest unparsed.\n \"\"\"\n\n try:\n parse_func = getattr(self, '_parse_lib_part_{}'.format(self.tool))\n parse_func(just_get_name)\n except AttributeError:\n logger.error(\n \"Can't create a part with an unknown ECAD tool file format: {}.\".\n format(self.tool))\n raise Exception\n\n def associate_pins(self):\n \"\"\"\n Make sure all the pins in a part have valid references to the part.\n \"\"\"\n for p in self.pins:\n p.part = self\n\n def copy(self, num_copies=None, dest=NETLIST, circuit=None, **attribs):\n \"\"\"\n Make zero or more copies of this part while maintaining all pin/net\n connections.\n\n Args:\n num_copies: Number of copies to make of this part.\n dest: Indicates where the copy is destined for (e.g., NETLIST).\n\n Keyword Args:\n attribs: Name/value pairs for setting attributes for the copy.\n\n Returns:\n A list of Part copies or a single Part if num_copies==1.\n\n Raises:\n Exception if the requested number of copies is a non-integer or negative.\n\n Notes:\n An instance of a part can be copied just by calling it like so::\n\n res = Part('device','R') # Get a resistor.\n res_copy = res(value='1K') # Copy the resistor and set resistance value.\n\n You can also use the multiplication operator to make copies::\n\n cap = Part('device', 'C') # Get a capacitor\n caps = 10 * cap # Make an array with 10 copies of it.\n \"\"\"\n\n from .defines import NETLIST\n from .Circuit import Circuit\n from .Pin import Pin\n\n # If the number of copies is None, then a single copy will be made\n # and returned as a scalar (not a list). Otherwise, the number of\n # copies will be set by the num_copies parameter or the number of\n # values supplied for each part attribute.\n num_copies_attribs = find_num_copies(**attribs)\n return_list = (num_copies is not None) or (num_copies_attribs > 1)\n if num_copies is None:\n num_copies = max(1, num_copies_attribs)\n\n # Check that a valid number of copies is requested.\n if not isinstance(num_copies, int):\n logger.error(\n \"Can't make a non-integer number ({}) of copies of a part!\".\n format(num_copies))\n raise Exception\n if num_copies < 0:\n logger.error(\n \"Can't make a negative number ({}) of copies of a part!\".\n format(num_copies))\n raise Exception\n\n # Now make copies of the part one-by-one.\n copies = []\n for i in range(num_copies):\n\n # Make a shallow copy of the part.\n cpy = copy(self)\n\n # Remove any existing Pin and PartUnit attributes so new ones\n # can be made in the copy without generating warning messages.\n rmv_attrs = [k for k,v in cpy.__dict__.items() if isinstance(v, (Pin, PartUnit))]\n for attr in rmv_attrs:\n delattr(cpy, attr)\n\n # The shallow copy will just put references to the pins of the\n # original into the copy, so create independent copies of the pins.\n cpy.pins = []\n cpy += [p.copy() for p in self.pins] # Add pin and its attribute.\n\n # Make sure all the pins have a reference to this new part copy.\n cpy.associate_pins()\n\n # Make copies of the units in the new part copy.\n for label in self.unit:\n # Get the pin numbers from the unit in the original.\n unit = self.unit[label]\n pin_nums = [p.num for p in unit.pins]\n\n # Make a unit in the part copy with the same pin numbers.\n cpy.make_unit(label, *pin_nums)\n\n # Clear the part reference of the copied part so a unique reference\n # can be assigned when the part is added to the circuit.\n # (This is not strictly necessary since the part reference will be\n # adjusted to be unique if needed during the addition process.)\n cpy._ref = None\n\n # Copied part starts off not being in any circuit.\n cpy.circuit = None\n\n # If copy is destined for a netlist, then add it to the Circuit its\n # source came from or else add it to the default Circuit object.\n if dest == NETLIST:\n # Place the copied part in the explicitly-stated circuit,\n # or else into the same circuit as the source part,\n # or else into the default circuit.\n if circuit:\n circuit += cpy\n elif isinstance(self.circuit, Circuit):\n self.circuit += cpy\n else:\n builtins.default_circuit += cpy\n\n # Enter any new attributes.\n for k, v in attribs.items():\n if isinstance(v, (list, tuple)):\n try:\n v = v[i]\n except IndexError:\n logger.error(\n \"{} copies of part {} were requested, but too few elements in attribute {}!\".\n format(num_copies, self.name, k))\n raise Exception\n setattr(cpy, k, v)\n\n # Add the part copy to the list of copies.\n copies.append(cpy)\n\n # Return a list of the copies made or just a single copy.\n if return_list:\n return copies\n return copies[0]\n\n # Make copies with the multiplication operator or by calling the object.\n __call__ = copy\n\n def __mul__(self, num_copies):\n if num_copies is None:\n num_copies = 0\n return self.copy(num_copies=num_copies)\n\n __rmul__ = __mul__\n\n def add_pins(self, *pins):\n \"\"\"Add one or more pins to a part.\"\"\"\n for pin in flatten(pins):\n pin.part = self\n self.pins.append(pin)\n # Create attributes so pin can be accessed by name or number such\n # as part.ENBL or part.p5.\n add_unique_attr(self, pin.name, pin)\n add_unique_attr(self, 'p'+str(pin.num), pin)\n return self\n\n __iadd__ = add_pins\n\n def get_pins(self, *pin_ids, **criteria):\n \"\"\"\n Return list of part pins selected by pin numbers or names.\n\n Args:\n pin_ids: A list of strings containing pin names, numbers,\n regular expressions, slices, lists or tuples. If empty,\n then it will select all pins.\n\n Keyword Args:\n criteria: Key/value pairs that specify attribute values the\n pins must have in order to be selected.\n\n Returns:\n A list of pins matching the given IDs and satisfying all the criteria,\n or just a single Pin object if only a single match was found.\n Or None if no match was found.\n\n Notes:\n Pins can be selected from a part by using brackets like so::\n\n atmega = Part('atmel', 'ATMEGA16U2')\n net = Net()\n atmega[1] += net # Connects pin 1 of chip to the net.\n net += atmega['RESET'] # Connects reset pin to the net.\n \"\"\"\n\n from .NetPinList import NetPinList\n from .Alias import Alias\n\n # If no pin identifiers were given, then use a wildcard that will\n # select all pins.\n if not pin_ids:\n pin_ids = ['.*']\n\n # Determine the minimum and maximum pin ids if they don't already exist.\n if 'min_pin' not in dir(self) or 'max_pin' not in dir(self):\n self.min_pin, self.max_pin = self._find_min_max_pins()\n\n # Go through the list of pin IDs one-by-one.\n pins = NetPinList()\n for p_id in expand_indices(self.min_pin, self.max_pin, *pin_ids):\n\n # Does pin ID (either integer or string) match a pin number...\n tmp_pins = filter_list(self.pins, num=str(p_id), do_str_match=True, **criteria)\n if tmp_pins:\n pins.extend(tmp_pins)\n continue\n\n # OK, assume it's not a pin number but a pin name. Look for an\n # exact match.\n tmp_pins = filter_list(self.pins, name=p_id, do_str_match=True, **criteria)\n if tmp_pins:\n pins.extend(tmp_pins)\n continue\n\n # OK, now check pin aliases for an exact match.\n tmp_pins = filter_list(self.pins, alias=p_id, do_str_match=True, **criteria)\n if tmp_pins:\n pins.extend(tmp_pins)\n continue\n\n # OK, pin ID is not a pin number and doesn't exactly match a pin\n # name or alias. Does it match a substring within a pin name?\n p_id_re = ''.join(['.*', p_id, '.*'])\n tmp_pins = filter_list(self.pins, name=p_id_re, **criteria)\n if tmp_pins:\n pins.extend(tmp_pins)\n continue\n\n # Pin ID didn't match a substring in the pin names, so now check\n # the pin aliases.\n p_id_re_alias = Alias(p_id_re, id(self))\n tmp_pins = filter_list(self.pins, alias=p_id_re_alias, **criteria)\n if tmp_pins:\n pins.extend(tmp_pins)\n continue\n\n return list_or_scalar(pins)\n\n # Get pins from a part using brackets, e.g. [1,5:9,'A[0-9]+'].\n __getitem__ = get_pins\n\n def __setitem__(self, ids, *pins_nets_buses):\n \"\"\"\n You can't assign to the pins of parts. You must use the += operator.\n\n This method is a work-around that allows the use of the += for making\n connections to pins while prohibiting direct assignment. Python\n processes something like my_part['GND'] += gnd as follows::\n\n 1. Part.__getitem__ is called with 'GND' as the index. This\n returns a single Pin or a NetPinList.\n 2. The Pin.__iadd__ or NetPinList.__iadd__ method is passed\n the thing to connect to the pin (gnd in this case). This method\n makes the actual connection to the part pin or pins. Then it\n creates an iadd_flag attribute in the object it returns.\n 3. Finally, Part.__setitem__ is called. If the iadd_flag attribute\n is true in the passed argument, then __setitem__ was entered\n as part of processing the += operator. If there is no\n iadd_flag attribute, then __setitem__ was entered as a result\n of using a direct assignment, which is not allowed.\n \"\"\"\n\n # If the iadd_flag is set, then it's OK that we got\n # here and don't issue an error. Also, delete the flag.\n if getattr(pins_nets_buses[0], 'iadd_flag', False):\n del pins_nets_buses[0].iadd_flag\n return\n\n # No iadd_flag or it wasn't set. This means a direct assignment\n # was made to the pin, which is not allowed.\n logger.error(\"Can't assign to a part! Use the += operator.\")\n raise Exception\n\n def is_connected(self):\n \"\"\"\n Return T/F depending upon whether a part is connected in a netlist.\n\n If a part has pins but none of them are connected to nets, then\n this method will return False. Otherwise, it will return True even if\n the part has no pins (which can be the case for mechanical parts,\n silkscreen logos, or other non-electrical schematic elements).\n \"\"\"\n\n # Assume parts without pins (like mech. holes) are always connected.\n if len(self.pins) == 0:\n return True\n\n # If any pin is found to be connected to a net, return True.\n for p in self.pins:\n if p.is_connected():\n return True\n\n # No net connections found, so return False.\n return False\n\n def is_movable(self):\n \"\"\"\n Return T/F if the part can be moved from one circuit into another.\n\n This method returns true if:\n 1) the part is not in a circuit, or\n 2) the part has pins but none of them are connected to nets, or\n 3) the part has no pins (which can be the case for mechanical parts,\n silkscreen logos, or other non-electrical schematic elements).\n \"\"\"\n from .Circuit import Circuit\n\n return not isinstance(\n self.circuit,\n Circuit) or not self.is_connected() or not self.pins\n\n def set_pin_alias(self, alias, *pin_ids, **criteria):\n \"\"\"\n Set the alias for a part pin.\n\n Args:\n alias: The alias for the pin.\n pin_ids: A list of strings containing pin names, numbers,\n regular expressions, slices, lists or tuples.\n\n Keyword Args:\n criteria: Key/value pairs that specify attribute values the\n pin must have in order to be selected.\n\n Returns:\n Nothing.\n \"\"\"\n\n from .Alias import Alias\n\n pin = self.get_pins(*pin_ids, **criteria)\n if pin is None:\n logger.error('Cannot set alias for non-existent pin.')\n raise Exception\n elif isinstance(pin, list):\n logger.error('Cannot use the same alias for multiple pins.')\n raise Exception\n else:\n setattr(pin, 'alias', Alias(alias, id(pin)))\n add_unique_attr(self, alias, pin)\n\n def make_unit(self, label, *pin_ids, **criteria):\n \"\"\"\n Create a PartUnit from a set of pins in a Part object.\n\n Parts can be organized into smaller pieces called PartUnits. A PartUnit\n acts like a Part but contains only a subset of the pins of the Part.\n\n Args:\n label: The label used to identify the PartUnit.\n pin_ids: A list of strings containing pin names, numbers,\n regular expressions, slices, lists or tuples.\n\n Keyword Args:\n criteria: Key/value pairs that specify attribute values the\n pin must have in order to be selected.\n\n Returns:\n The PartUnit.\n \"\"\"\n\n # Warn if the unit label collides with any of the part's pin names.\n collisions = self.get_pins(label)\n if collisions:\n logger.warning(\n \"Using a label ({}) for a unit of {} that matches one or more of it's pin names ({})!\".\n format(label, self.erc_desc(), collisions))\n\n # Create the part unit.\n self.unit[label] = PartUnit(self, *pin_ids, **criteria)\n add_unique_attr(self, label, self.unit[label])\n return self.unit[label]\n\n def _get_fields(self):\n \"\"\"\n Return a list of component field names.\n \"\"\"\n\n from .Pin import Pin\n\n # Get all the component attributes and subtract all the ones that\n # should not appear under \"fields\" in the netlist or XML.\n\t\t# Also, skip all the Pin and PartUnit attributes.\n fields = set([k for k,v in self.__dict__.items() if not isinstance(v, (Pin,PartUnit))])\n non_fields = set([\n 'name', 'min_pin', 'max_pin', 'hierarchy', '_value', '_ref',\n 'ref_prefix', 'unit', 'num_units', 'part_defn', 'definition',\n 'fields', 'draw', 'lib', 'fplist', 'do_erc', 'aliases', 'tool',\n 'pins', 'footprint', 'circuit', 'skidl_trace'\n ])\n return list(fields - non_fields)\n\n def generate_netlist_component(self, tool=None):\n \"\"\"\n Generate the part information for inclusion in a netlist.\n\n Args:\n tool: The format for the netlist file (e.g., KICAD).\n \"\"\"\n\n import skidl\n\n if tool is None:\n tool = skidl.get_default_tool()\n\n try:\n gen_func = getattr(self, '_gen_netlist_comp_{}'.format(tool))\n return gen_func()\n except AttributeError:\n logger.error(\n \"Can't generate netlist in an unknown ECAD tool format ({}).\".\n format(tool))\n raise Exception\n\n def generate_xml_component(self, tool=None):\n \"\"\"\n Generate the part information for inclusion in an XML file.\n\n Args:\n tool: The format for the XML file (e.g., KICAD).\n \"\"\"\n\n import skidl\n\n if tool is None:\n tool = skidl.get_default_tool()\n\n try:\n gen_func = getattr(self, '_gen_xml_comp_{}'.format(tool))\n return gen_func()\n except AttributeError:\n logger.error(\n \"Can't generate XML in an unknown ECAD tool format ({}).\".\n format(tool))\n raise Exception\n\n def erc(self):\n \"\"\"\n Do electrical rules check on a part in the schematic.\n \"\"\"\n\n from .Pin import Pin\n\n # Don't check this part if the flag is not true.\n if not self.do_erc:\n return\n\n # Check each pin of the part.\n for p in self.pins:\n\n # Skip this pin if the flag is false.\n if not p.do_erc:\n continue\n\n # Error if a pin is unconnected but not of type NOCONNECT.\n if p.net is None:\n if p.func != Pin.NOCONNECT:\n erc_logger.warning(\n 'Unconnected pin: {p}.'.format(p=p.erc_desc()))\n\n # Error if a no-connect pin is connected to a net.\n elif p.net.drive != Pin.NOCONNECT_DRIVE:\n if p.func == Pin.NOCONNECT:\n erc_logger.warning(\n 'Incorrectly connected pin: {p} should not be connected to a net ({n}).'.\n format(p=p.erc_desc(), n=p.net.name))\n\n def erc_desc(self):\n \"\"\"Create description of part for ERC and other error reporting.\"\"\"\n return \"{p.name}/{p.ref}\".format(p=self)\n\n def __str__(self):\n \"\"\"Return a description of the pins on this part as a string.\"\"\"\n return '\\n {name} ({aliases}): {desc}\\n {pins}'.format(\n name=self.name, \n aliases=', '.join(getattr(self, 'aliases','')), \n desc=self.description, \n pins='\\n '.join([p.__str__() for p in self.pins])\n )\n\n __repr__ = __str__\n\n def export(self):\n \"\"\"Return a string to recreate a Part object.\"\"\"\n keys = self._get_fields()\n keys.extend(('ref_prefix', 'num_units', 'fplist', 'do_erc', 'aliases',\n 'pin', 'footprint'))\n attribs = []\n attribs.append('{}={}'.format('name', repr(self.name)))\n attribs.append('dest=TEMPLATE')\n attribs.append('tool=SKIDL')\n for k in keys:\n v = getattr(self, k, None)\n if v:\n attribs.append('{}={}'.format(k, repr(v)))\n if self.pins:\n pin_strs = [p.export() for p in self.pins]\n attribs.append('pins=[{}]'.format(','.join(pin_strs)))\n\n # Return the string after removing all the non-ascii stuff (like ohm symbols).\n return 'Part({})'.format(','.join(attribs)).encode('ascii', 'ignore').decode('utf-8')\n\n @property\n def ref(self):\n \"\"\"\n Get, set and delete the part reference.\n\n When setting the part reference, if another part with the same\n reference is found, the reference for this part is adjusted to make\n it unique.\n \"\"\"\n return self._ref\n\n @ref.setter\n def ref(self, r):\n # Remove the existing reference so it doesn't cause a collision if the\n # object is renamed with its existing name.\n self._ref = None\n\n # Now name the object with the given reference or some variation\n # of it that doesn't collide with anything else in the list.\n self._ref = ensure_unique_name(self.circuit.parts, 'ref', self.ref_prefix,\n r)\n return\n\n @ref.deleter\n def ref(self):\n \"\"\"Delete the part reference.\"\"\"\n self._ref = None\n\n @property\n def value(self):\n \"\"\"Get, set and delete the part value.\"\"\"\n try:\n if isinstance(self._value, UnitValue):\n return self._value\n else:\n return str(self._value)\n except AttributeError:\n # If part has no value, return its part name as the value. This is\n # done in KiCad where a resistor value is set to 'R' if no\n # explicit value was set.\n return self.name\n\n @value.setter\n def value(self, value):\n \"\"\"Set the part value.\"\"\"\n self._value = value\n\n @value.deleter\n def value(self):\n \"\"\"Delete the part value.\"\"\"\n del self._value\n\n @property\n def foot(self):\n \"\"\"Get, set and delete the part footprint.\"\"\"\n return self._foot\n\n @foot.setter\n def foot(self, footprint):\n \"\"\"Set the part footprint.\"\"\"\n self._foot = str(footprint)\n\n @foot.deleter\n def foot(self):\n \"\"\"Delete the part footprint.\"\"\"\n del self._foot\n\n def __bool__(self):\n \"\"\"Any valid Part is True\"\"\"\n return True\n __nonzero__ = __bool__ # Python 2 compatibility.\n\n\n##############################################################################\n\n\nclass SkidlPart(Part):\n \"\"\"\n A class for storing a SKiDL definition of a schematic part. It's identical\n to its Part superclass except:\n *) The tool defaults to SKIDL.\n *) The destination defaults to TEMPLATE so that it's easier to start\n a part and then add pins to it without it being added to the netlist.\n \"\"\"\n\n from .defines import SKIDL, TEMPLATE\n\n def __init__(self,\n lib=None,\n name=None,\n dest=TEMPLATE,\n tool=SKIDL,\n connections=None,\n **attribs):\n super(SkidlPart, self).__init__(lib, name, dest, tool, connections,\n attribs)\n\n\n##############################################################################\n\n\nclass PartUnit(Part):\n \"\"\"\n Create a PartUnit from a set of pins in a Part object.\n\n Parts can be organized into smaller pieces called PartUnits. A PartUnit\n acts like a Part but contains only a subset of the pins of the Part.\n\n Args:\n part: This is the parent Part whose pins the PartUnit is built from.\n pin_ids: A list of strings containing pin names, numbers,\n regular expressions, slices, lists or tuples.\n\n Keyword Args:\n criteria: Key/value pairs that specify attribute values the\n pin must have in order to be selected.\n\n Examples:\n This will return unit 1 from a part::\n\n lm358 = Part('linear','lm358')\n lm358a = PartUnit(lm358, unit=1)\n\n Or you can specify the pins directly::\n\n lm358a = PartUnit(lm358, 1, 2, 3)\n \"\"\"\n\n def __init__(self, part, *pin_ids, **criteria):\n # Remember the part that this unit belongs to.\n self.parent = part\n\n # Give the PartUnit the same information as the Part it is generated\n # from so it can act the same way, just with fewer pins.\n for k, v in part.__dict__.items():\n self.__dict__[k] = v\n\n # Don't associate any units from the parent with this unit itself.\n self.unit = {}\n\n # Remove the pins copied from the parent and replace them with\n # pins selected from the parent.\n self.pins = []\n self.add_pins_from_parent(*pin_ids, **criteria)\n\n def add_pins_from_parent(self, *pin_ids, **criteria):\n \"\"\"\n Add selected pins from the parent to the part unit.\n \"\"\"\n\n # Get new pins selected from the parent.\n new_pins = to_list(self.parent.get_pins(*pin_ids, **criteria))\n\n # Add attributes for accessing the new pins.\n for pin in new_pins:\n add_unique_attr(self, 'p'+str(pin.num), pin)\n add_unique_attr(self, pin.name, pin)\n\n # Add new pins to existing pins of the unit, removing duplicates.\n self.pins = list(set(self.pins + new_pins))\n", "repo_name": "mythagel/icebit", "sub_path": "pcb/skidl/Part.py", "file_name": "Part.py", "file_ext": "py", "file_size_in_byte": 32095, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "future.standard_library.install_aliases", "line_number": 13, "usage_type": "call"}, {"api_name": "future.standard_library", "line_number": 13, "usage_type": "name"}, {"api_name": "skidl.get_default_tool", "line_number": 82, "usage_type": "call"}, {"api_name": "SchLib.SchLib", "line_number": 103, "usage_type": "call"}, {"api_name": "skidl.QUERY_BACKUP_LIB", "line_number": 105, "usage_type": "attribute"}, {"api_name": "skidl.load_backup_lib", "line_number": 109, "usage_type": "call"}, {"api_name": "defines.TEMPLATE", "line_number": 116, "usage_type": "name"}, {"api_name": "defines.NETLIST", "line_number": 132, "usage_type": "name"}, {"api_name": "defines.SKIDL", "line_number": 136, "usage_type": "name"}, {"api_name": "defines.LIBRARY", "line_number": 150, "usage_type": "name"}, {"api_name": "defines.NETLIST", "line_number": 151, "usage_type": "name"}, {"api_name": "defines.TEMPLATE", "line_number": 158, "usage_type": "name"}, {"api_name": "builtins.dict", "line_number": 166, "usage_type": "argument"}, {"api_name": "builtins.int", "line_number": 180, "usage_type": "call"}, {"api_name": "defines.NETLIST", "line_number": 217, "usage_type": "name"}, {"api_name": "builtins.int", "line_number": 261, "usage_type": "argument"}, {"api_name": "builtins.range", "line_number": 274, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 277, "usage_type": "call"}, {"api_name": "Pin.Pin", "line_number": 281, "usage_type": "name"}, {"api_name": "defines.NETLIST", "line_number": 313, "usage_type": "name"}, {"api_name": "Circuit.Circuit", "line_number": 319, "usage_type": "name"}, {"api_name": "builtins.default_circuit", "line_number": 322, "usage_type": "attribute"}, {"api_name": "copy.copy", "line_number": 345, "usage_type": "name"}, {"api_name": "builtins.str", "line_number": 362, "usage_type": "call"}, {"api_name": "NetPinList.NetPinList", "line_number": 407, "usage_type": "call"}, {"api_name": "builtins.str", "line_number": 411, "usage_type": "call"}, {"api_name": "Alias.Alias", "line_number": 439, "usage_type": "call"}, {"api_name": "Circuit.Circuit", "line_number": 518, "usage_type": "name"}, {"api_name": "Alias.Alias", "line_number": 547, "usage_type": "call"}, {"api_name": "Pin.Pin", "line_number": 592, "usage_type": "name"}, {"api_name": "skidl.get_default_tool", "line_number": 612, "usage_type": "call"}, {"api_name": "skidl.get_default_tool", "line_number": 634, "usage_type": "call"}, {"api_name": "Pin.Pin.NOCONNECT", "line_number": 665, "usage_type": "attribute"}, {"api_name": "Pin.Pin", "line_number": 665, "usage_type": "name"}, {"api_name": "Pin.Pin.NOCONNECT_DRIVE", "line_number": 670, "usage_type": "attribute"}, {"api_name": "Pin.Pin", "line_number": 670, "usage_type": "name"}, {"api_name": "Pin.Pin.NOCONNECT", "line_number": 671, "usage_type": "attribute"}, {"api_name": "Pin.Pin", "line_number": 671, "usage_type": "name"}, {"api_name": "builtins.str", "line_number": 746, "usage_type": "call"}, {"api_name": "builtins.str", "line_number": 771, "usage_type": "call"}, {"api_name": "defines.TEMPLATE", "line_number": 801, "usage_type": "name"}, {"api_name": "defines.SKIDL", "line_number": 802, "usage_type": "name"}, {"api_name": "builtins.super", "line_number": 805, "usage_type": "call"}, {"api_name": "builtins.str", "line_number": 866, "usage_type": "call"}]} +{"seq_id": "11710855578", "text": "from typing import Any\n\nimport pelican\nfrom pelican import signals\n\nfrom . import PLUMAGE_ROOT\nfrom .dom_transforms import transform\nfrom .favicon import add_favicon_assets\nfrom .webassets import setup_webassets\n\nALL_CODE_STYLES: set[str] = {\n f.stem for f in PLUMAGE_ROOT.joinpath(\"static/css/pygments/\").resolve().iterdir()\n}\n\n\ndef register_signals() -> None:\n signals.initialized.connect(check_config)\n signals.static_generator_finalized.connect(add_favicon_assets)\n signals.content_written.connect(transform)\n\n\ndef check_config(sender) -> None:\n \"\"\"Validates and setup Plumage configuration.\"\"\"\n conf = sender.settings\n\n # Keep some metadata around.\n conf[\"PELICAN_VERSION\"] = pelican.__version__\n\n # Check code coloe scheme ID.\n conf = check_codestyle(conf)\n\n # Setup webassets plugin.\n conf = setup_webassets(conf)\n\n sender.settings = conf\n\n\ndef check_codestyle(conf: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Check selected code color scheme ID is recognized by Pygments.\n\n Defaults to \"monokai\" if none set in Pelican's conf.\n \"\"\"\n # Defaults code style to Monokai.\n if not conf.get(\"CODE_STYLE\"):\n conf[\"CODE_STYLE\"] = \"monokai\"\n\n code_style = conf[\"CODE_STYLE\"]\n if code_style not in ALL_CODE_STYLES:\n msg = f\"{code_style} not recognized among {sorted(ALL_CODE_STYLES)}.\"\n raise ValueError(\n msg,\n )\n\n return conf\n", "repo_name": "kdeldycke/plumage", "sub_path": "plumage/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 1423, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 53, "dataset": "github-code", "pt": "94", "api": [{"api_name": "pelican.signals.initialized.connect", "line_number": 17, "usage_type": "call"}, {"api_name": "pelican.signals.initialized", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pelican.signals", "line_number": 17, "usage_type": "name"}, {"api_name": "pelican.signals.static_generator_finalized.connect", "line_number": 18, "usage_type": "call"}, {"api_name": "favicon.add_favicon_assets", "line_number": 18, "usage_type": "argument"}, {"api_name": "pelican.signals.static_generator_finalized", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pelican.signals", "line_number": 18, "usage_type": "name"}, {"api_name": "pelican.signals.content_written.connect", "line_number": 19, "usage_type": "call"}, {"api_name": "dom_transforms.transform", "line_number": 19, "usage_type": "argument"}, {"api_name": "pelican.signals.content_written", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pelican.signals", "line_number": 19, "usage_type": "name"}, {"api_name": "pelican.__version__", "line_number": 27, "usage_type": "attribute"}, {"api_name": "webassets.setup_webassets", "line_number": 33, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 38, "usage_type": "name"}]} +{"seq_id": "20598720568", "text": "import asyncio\nfrom random import random\n\n\nasync def sleep_task():\n await asyncio.sleep(2)\n return \"Task completed\"\n\n\nasync def estimate_pi(number_of_points: int = 1000000) -> float:\n points = [(random(), random()) for _ in range(number_of_points)]\n number_of_points_in_circle = sum([1 for x, y in points if (x**2 + y**2) < 1])\n return (number_of_points_in_circle / number_of_points) * 4\n\n\n# Function that returns a Future\ndef create_future_task():\n loop = asyncio.get_event_loop()\n future = loop.create_future()\n\n async def calc_pi() -> None:\n result = await estimate_pi()\n future.set_result(result)\n\n asyncio.ensure_future(calc_pi())\n return future\n\n\nasync def main() -> None:\n future = create_future_task()\n print(\"Pi estimation in progress ...\")\n\n coroutine_result = await sleep_task()\n print(\"Coroutine Result:\", coroutine_result)\n\n future_result = await future\n print(\"Pi estimation:\", future_result)\n\n\nasyncio.run(main())\n", "repo_name": "orsha2/tutorials", "sub_path": "01-asyncio/lesson4/sol2.py", "file_name": "sol2.py", "file_ext": "py", "file_size_in_byte": 991, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "asyncio.sleep", "line_number": 6, "usage_type": "call"}, {"api_name": "random.random", "line_number": 11, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 18, "usage_type": "call"}, {"api_name": "asyncio.ensure_future", "line_number": 25, "usage_type": "call"}, {"api_name": "asyncio.run", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "44163007640", "text": "from telegraph import Telegraph\n\ntele_ = Telegraph()\n\ndef telegrapher(a_title: str, content: str) -> str:\n auth_name = tele_.create_account(short_name=\"@Mister_Dark_Prince\")\n resp = tele_.create_page(\n title=a_title,\n author_name=auth_name,\n author_url=\"https://t.me/TGFRNDZ\",\n html_content=content,\n )\n link_ = resp[\"url\"]\n return link_\n\n\ndef int_list(list_):\n intlist = []\n for one in list_:\n if one.isdigit():\n one = int(one)\n intlist.append(one)\n return intlist\n\nasync def username_list(list_):\n u_list = []\n for one in list_:\n u_list.append((await bot.get_users(one)).username)\n return u_list\n", "repo_name": "Mr-Dark-Prince/Vc-Player", "sub_path": "EvaMaria/darkprince/tools.py", "file_name": "tools.py", "file_ext": "py", "file_size_in_byte": 691, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "94", "api": [{"api_name": "telegraph.Telegraph", "line_number": 3, "usage_type": "call"}]} +{"seq_id": "18194619143", "text": "import sys\r\nfrom PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QMainWindow, QMessageBox, QCalendarWidget\r\nfrom director_window import Ui_MainWindow\r\nfrom show_ads import Ui_Form_ads\r\nfrom title_body import Ui_Form_title_body\r\nfrom edit import Ui_Edit_Form\r\nimport sqlite3\r\n\r\ncon = sqlite3.connect('dates.db')\r\ndata_dates = con.cursor()\r\ntry:\r\n data_dates.execute('''CREATE TABLE dates (\r\n date text,\r\n title text,\r\n message text,\r\n star text\r\n )''')\r\nexcept sqlite3.OperationalError:\r\n pass\r\n\r\n\r\n# редактирование записи\r\nclass Edit_Form(QWidget, Ui_Edit_Form):\r\n def __init__(self, btn, date_id, parent=None):\r\n parent.hide()\r\n super(Edit_Form, self).__init__()\r\n self.setupUi(self)\r\n self.btn = btn\r\n con = sqlite3.connect('dates.db')\r\n cur = con.cursor()\r\n text = cur.execute(f'''SELECT * FROM dates WHERE rowid = {date_id}''').fetchone()\r\n # self.lineEdit.setText(text[1])\r\n self.lineEdit.setText(text[1])\r\n self.textEdit.setText(text[-2])\r\n self.pushButton.clicked.connect(lambda: self.update_entry(date_id))\r\n\r\n # само редактирование\r\n def update_entry(self, date_id):\r\n con = sqlite3.connect('dates.db')\r\n cur = con.cursor()\r\n cur.execute(\r\n f'''UPDATE dates SET title = \"{self.lineEdit.text()}\", message = \"{self.textEdit.toPlainText()}\"\r\n WHERE rowid = {date_id}''')\r\n con.commit()\r\n cur.execute(f'SELECT * FROM dates WHERE rowid = {date_id}')\r\n self.btn.setText(self.lineEdit.text())\r\n self.hide()\r\n\r\n\r\n# показываю запись\r\nclass Show_title_body(QWidget, Ui_Form_title_body):\r\n def __init__(self, title, date):\r\n super(Show_title_body, self).__init__()\r\n self.setupUi(self)\r\n self.update()\r\n # date base\r\n con = sqlite3.connect('dates.db')\r\n data_dates = con.cursor()\r\n self.btn = title\r\n # для фильтрации записи по id\r\n self.id_date = \\\r\n data_dates.execute(\r\n f'SELECT rowid FROM dates WHERE title = \"{title.text()}\" AND date = \"{date}\"').fetchone()[0]\r\n data_dates.execute(f'SELECT * FROM dates WHERE title = \"{title.text()}\" AND date = \"{date}\"')\r\n date_note = data_dates.fetchall()\r\n # Показ записи\r\n for date, title_, message, boolean in date_note:\r\n if title.text() == title_:\r\n self.label_title.setText(title.text())\r\n self.textBrowser.append(message)\r\n self.message = message\r\n self.pushButton_pen.clicked.connect(self.edit_entry)\r\n\r\n # редактирование\r\n def edit_entry(self):\r\n self.edit = Edit_Form(self.btn, self.id_date, self)\r\n self.edit.show()\r\n\r\n\r\n# все записи за день\r\nclass Show_Ads(QWidget, Ui_Form_ads):\r\n def __init__(self, date):\r\n super(Show_Ads, self).__init__()\r\n self.setupUi(self)\r\n self.setup(date)\r\n\r\n def setup(self, date):\r\n\r\n self.label.setText(f'Заметки за {date}')\r\n self.pushButton_clear.clicked.connect(lambda: self.delete_date(date))\r\n # date base\r\n con = sqlite3.connect('dates.db')\r\n data_dates = con.cursor()\r\n # фильтрация по дню\r\n data_dates.execute(f'SELECT * FROM dates WHERE date = \"{date}\" ORDER BY star')\r\n for date_, title, message, boolean in reversed(data_dates.fetchall()):\r\n self.btn = QPushButton(title, self)\r\n # ставим отмеченные (желтые) записи на верх списка\r\n if boolean == 'True':\r\n self.btn.setStyleSheet(\r\n 'QPushButton{\\n\tbackground-color: yellow;\\n\tborder: 1px solid; padding: 5px 0;\\n}'\r\n 'QPushButton:hover{\\n\tbackground-color: #DAA520}'\r\n )\r\n else:\r\n self.btn.setStyleSheet(\r\n 'QPushButton{\\n\tbackground-color: white;\\n\tborder: 1px solid; padding: 5px 0;\\n}'\r\n 'QPushButton:hover{\\n\tbackground-color: #DCDCDC}'\r\n )\r\n self.btn.clicked.connect(lambda: self.show_ads(self.btn, date))\r\n self.verticalLayout.addWidget(self.btn)\r\n\r\n # показ записи\r\n def show_ads(self, title, date):\r\n self.show_title_body = Show_title_body(self.sender(), date)\r\n self.show_title_body.show()\r\n\r\n # очистка всех записей за определенный день\r\n def delete_date(self, date):\r\n messagebox = QMessageBox.question(self, 'Delete notes?', 'Вы хотиту удалить все заметки на эту дату?',\r\n QMessageBox.Yes | QMessageBox.No)\r\n if messagebox == QMessageBox.Yes:\r\n con = sqlite3.connect('dates.db')\r\n data_dates = con.cursor()\r\n data_dates.execute(f'DELETE from dates WHERE date = \"{date}\"')\r\n con.commit()\r\n # clear vertical layout\r\n for i in reversed(range(self.verticalLayout.count())):\r\n self.verticalLayout.itemAt(i).widget().setParent(None)\r\n self.show()\r\n\r\n\r\n# главное окно\r\nclass Director_Window(QMainWindow, Ui_MainWindow):\r\n def __init__(self):\r\n super(Director_Window, self).__init__()\r\n self.setupUi(self)\r\n self.setWindowTitle('Замтеки на день')\r\n self.pushButton.clicked.connect(self.create_ad)\r\n self.pushButton_3.clicked.connect(lambda: self.show_ads(self.calendarWidget.selectedDate().toString()))\r\n self.pushButton_star.clicked.connect(self.change_color_star)\r\n self.boolean = False\r\n\r\n # создание новой записи\r\n def create_ad(self):\r\n con = sqlite3.connect('dates.db')\r\n data_dates = con.cursor()\r\n title = self.lineEdit.text()\r\n body = self.plainTextEdit.toPlainText()\r\n date = self.calendarWidget.selectedDate().toString()\r\n # проверяем нет ли уже такой записи в базе данных\r\n if not data_dates.execute(\r\n f'SELECT * FROM dates WHERE date = \"{date}\" AND title = \"{title}\"').fetchone() and self.lineEdit.text() \\\r\n and self.plainTextEdit.toPlainText():\r\n data_dates.execute(f\"INSERT INTO dates VALUES ('{date}', '{title}', '{body}', '{self.boolean}')\")\r\n con.commit()\r\n # clear all\r\n self.lineEdit.clear()\r\n self.plainTextEdit.clear()\r\n self.pushButton_star.setStyleSheet('')\r\n self.boolean = False\r\n # change statusbar\r\n self.statusbar.setStyleSheet('background-color: green;')\r\n self.statusbar.showMessage('Заметка создана')\r\n else:\r\n self.statusbar.setStyleSheet('background-color: red;')\r\n self.statusbar.showMessage('Такая заметка уже имеется')\r\n\r\n def show_ads(self, date):\r\n self.ads = Show_Ads(date)\r\n self.ads.show()\r\n\r\n # отмечаем запись\r\n def change_color_star(self):\r\n if not self.boolean:\r\n self.pushButton_star.setStyleSheet(\r\n 'QPushButton{\\n\tbackground-color: yellow;\\n\tborder: 1px solid; padding: 0 31px\\n}')\r\n self.boolean = True\r\n else:\r\n self.pushButton_star.setStyleSheet('')\r\n self.boolean = False\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n ex = Director_Window()\r\n ex.show()\r\n data_dates.close()\r\n sys.exit(app.exec())\r\n# возможность редактировать заметки\r\n", "repo_name": "l-arkadiy-l/---Mini-projects---", "sub_path": "yandex_project/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 7842, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "94", "api": [{"api_name": "sqlite3.connect", "line_number": 9, "usage_type": "call"}, {"api_name": "sqlite3.OperationalError", "line_number": 18, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 23, "usage_type": "name"}, {"api_name": "edit.Ui_Edit_Form", "line_number": 23, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 39, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 51, "usage_type": "name"}, {"api_name": "title_body.Ui_Form_title_body", "line_number": 51, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 57, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 81, "usage_type": "name"}, {"api_name": "show_ads.Ui_Form_ads", "line_number": 81, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 92, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 97, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.question", "line_number": 119, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 119, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.Yes", "line_number": 120, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 120, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.No", "line_number": 120, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.Yes", "line_number": 121, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 121, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 122, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 133, "usage_type": "name"}, {"api_name": "director_window.Ui_MainWindow", "line_number": 133, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 145, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 184, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 184, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 188, "usage_type": "call"}]} +{"seq_id": "41374760718", "text": "import unittest\nfrom decimal import Decimal\n\nfrom shapely.geometry import Point\n\nfrom app.geocoder import Client\nfrom app.distance_calculator import find_distance_to_polygon, mkad\nfrom config import api_key\n\n\nclass Test(unittest.TestCase):\n def test_find_distance_with_address(self):\n client = Client(api_key)\n coordinates = client.transform_to_coordinates('Можайск')\n self.assertEqual(find_distance_to_polygon(coordinates), 88)\n\n def test_find_distance_with_coordinates(self):\n lat, lon = Decimal(54.719362), Decimal(20.505681)\n self.assertEqual(find_distance_to_polygon((lat, lon)), 1072)\n\n def test_address_inside_mkad(self):\n client = Client(api_key)\n coordinates = client.transform_to_coordinates('Красная площадь')\n self.assertEqual(mkad.contains(Point(coordinates)), True) # True if point is inside MKAD\n\n def test_coordinates_inside_mkad(self):\n lat, lon = Decimal(55.778892), Decimal(37.59542)\n self.assertEqual(mkad.contains(Point((lat, lon))), True) # True if point is inside MKAD\n\n\nif __name__ == '__main__':\n unittest.main()\n\n", "repo_name": "ShabUlysses/MKAD", "sub_path": "tests/test_distance.py", "file_name": "test_distance.py", "file_ext": "py", "file_size_in_byte": 1147, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "unittest.TestCase", "line_number": 11, "usage_type": "attribute"}, {"api_name": "app.geocoder.Client", "line_number": 13, "usage_type": "call"}, {"api_name": "config.api_key", "line_number": 13, "usage_type": "argument"}, {"api_name": "app.distance_calculator.find_distance_to_polygon", "line_number": 15, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 18, "usage_type": "call"}, {"api_name": "app.distance_calculator.find_distance_to_polygon", "line_number": 19, "usage_type": "call"}, {"api_name": "app.geocoder.Client", "line_number": 22, "usage_type": "call"}, {"api_name": "config.api_key", "line_number": 22, "usage_type": "argument"}, {"api_name": "app.distance_calculator.mkad.contains", "line_number": 24, "usage_type": "call"}, {"api_name": "app.distance_calculator.mkad", "line_number": 24, "usage_type": "name"}, {"api_name": "shapely.geometry.Point", "line_number": 24, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 27, "usage_type": "call"}, {"api_name": "app.distance_calculator.mkad.contains", "line_number": 28, "usage_type": "call"}, {"api_name": "app.distance_calculator.mkad", "line_number": 28, "usage_type": "name"}, {"api_name": "shapely.geometry.Point", "line_number": 28, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "11119184826", "text": "import requests as rq\nimport pprint as pp\n\n# Получаем токены из файликов\nwith open('vk_token.txt', 'r') as vk_token_file:\n vk_token = vk_token_file.read().strip()\nwith open('ya_token.txt', 'r') as ya_token_file:\n ya_token = ya_token_file.read().strip()\n\n# Разобраться с токеном ВК, он, похоже, не нужен\n\n\n# Функция загрузки из ВК\nclass VkPhotosGet: \n # Пользак должен ввести токен яндекса и ид пользака вк\n # ya_token = input('Укажите токен с полигона Я.Диска: ')\n url = 'https://api.vk.com/method/'\n def __init__(self, vk_token):\n self.params = {\n 'access_token': vk_token,\n 'v':'5.131'\n }\n \n def get_url(self, count, owner_id=None):\n vk_photo_types = {\n 'w': 10, 'z': 9, 'y': 8, 'r': 7, 'q': 6, 'p': 5, 'o': 4, 'x':3, 'm': 2, 's': 1} \n # ava_df = pd.DataFrame()\n download_url = self.url + 'photos.get'\n download_params = {\n 'owner_id': owner_id, # если не объявлен дополнительно, определит по владельцу токена\n 'album_id': 'profile', # служебный альбом, только авы\n 'count': count,\n 'extended': 1 # респ возвращает доп поля, в частности likes \n }\n print(count)\n req = rq.get(download_url, params={**self.params, **download_params}).json()\n for_ya_list = []\n \n # Цикл для выбора максимальной фотки\n for item in req['response']['items']:\n max_photo_size = max(item['sizes'], key=lambda x: vk_photo_types[x['type']])\n for_ya_list.append({'url': max_photo_size['url'], \n 'type': max_photo_size['type'], \n 'likes': item['likes']['count'],\n 'id': item['id'],\n 'date': item['date']\n })\n return for_ya_list\n\n\n\n# get_ava = VkPhotosGet(vk_token)\n# pp.pprint(get_ava.get_url()) # Сёма - 34872912\n\n# print(ava_df)\n\n# f'{directory}/{i[\"likes\"]}_{datetime.now().strftime(\"%H-%M-%S\")}.jpg'\n# f'photo[\"likes\"][\"count\"]}_{photo[\"date\"]}.jpg'", "repo_name": "ArtyomSany4/VkBackUp", "sub_path": "backup_vk_api.py", "file_name": "backup_vk_api.py", "file_ext": "py", "file_size_in_byte": 2421, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "requests.get", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "16208888048", "text": "import json\nimport numpy as np\nfrom matplotlib.patches import Circle\nfrom scipy.spatial import cKDTree\nimport matplotlib.pyplot as plt\n\nfrom constants import REACH_ZONE_R, MAP_W, MAP_H, NODE_R, GOAL_R, \\\n AGENT_R, FOV_R\n\n\nclass Env():\n\n def __init__(self):\n self.map_id = None\n self.map = None\n self.kdtree = None\n self.tree_ids = None # Array of IDs matching kdtree.data\n self.node_lookup = {} # Node ID -> node obj (both nodes/goals)\n self.goal_ids = []\n self.origin_node_id = None\n\n def load_map(self, id=1):\n self.map_id = id\n with open('../maps/%d.json' % id, 'r') as f:\n self.map = json.load(f)\n points = []\n ids = []\n for n in self.map.get(\"nodes\"):\n point = [n.get('x'), n.get('y')]\n points.append(point)\n id = n.get('id')\n self.node_lookup[id] = n\n ids.append(id)\n if n.get('type') == 'goal':\n self.goal_ids.append(id)\n if point == [0, 0]:\n self.origin_node_id = id\n points = np.array(points)\n self.kdtree = cKDTree(points)\n self.tree_ids = np.array(ids)\n\n def goals(self):\n return [self.node(id) for id in self.goal_ids]\n\n def node(self, id):\n return self.node_lookup.get(id)\n\n def node_loc(self, id):\n n = self.node(id)\n if n:\n return np.array([n.get('x'), n.get('y')])\n\n def nearest_points(self, loc, r=FOV_R, return_sorted=False):\n idxs = self.kdtree.query_ball_point(loc, r, return_sorted=return_sorted)\n points = self.kdtree.data[idxs, :]\n return points, self.tree_ids[idxs]\n\n def nearest_node(self, loc):\n dist, idx = self.kdtree.query(loc, k=1)\n id = self.tree_ids[idx]\n return self.node(id), dist\n\n def nearest_goal(self, loc, r=3*FOV_R):\n points, ids = self.nearest_points(loc, r=r, return_sorted=True)\n for id in ids:\n if id in self.goal_ids:\n return self.node(id)\n\n def is_near_goal(self, loc):\n g = self.nearest_goal(loc, r=GOAL_R * 1.5)\n return g is not None\n\n def is_within_node(self, loc, lenience=1.0):\n \"\"\"\n Returns nodes (goal or node) if loc is within a tolerance of closest node\n \"\"\"\n n, dist = self.nearest_node(loc)\n is_goal = n.get('type') == 'goal'\n r = GOAL_R if is_goal else NODE_R\n if dist <= (1+lenience) * r:\n return n\n\n def observation(self, loc):\n \"\"\"\n Return nodes in observable foveal radius in relative coordinates.\n Sorting?\n \"\"\"\n points, ids = self.nearest_points(loc, r=FOV_R + NODE_R)\n return [self.node(id) for id in ids]\n\n def render_map(self, render_nodes=True, agent_loc=None, fovea_loc=None, path=None, ax=None):\n m = self.map\n if not ax:\n fig, ax = plt.subplots(dpi=144, figsize=(MAP_W/60, MAP_H/60))\n nodes = m.get(\"nodes\")\n for n in nodes:\n is_goal = n.get('type') == 'goal'\n if render_nodes or is_goal:\n x, y = n.get('x'), n.get('y')\n s = GOAL_R if is_goal else NODE_R\n ax.add_patch(Circle((x, y), s,\n fill=False, lw=1, alpha=0.5,\n edgecolor='green' if is_goal else 'black'))\n if fovea_loc is not None:\n ax.add_patch(Circle(fovea_loc, FOV_R, fill=False, edgecolor='yellow'))\n if agent_loc is not None:\n ax.add_patch(Circle(agent_loc, AGENT_R, fill=True, facecolor='blue', alpha=0.5))\n reach_zone = Circle(agent_loc, REACH_ZONE_R, fill=False, color='black', alpha=0.2)\n ax.add_patch(reach_zone)\n if path:\n X, Y = [], []\n for loc in path:\n X.append(loc[0])\n Y.append(loc[1])\n ax.plot(X, Y, lw=2, color='black', dashes=[2, 2])\n ax.set_xlim((-MAP_W/2, MAP_W/2))\n ax.set_ylim((-MAP_H/2, MAP_H/2))\n return ax\n\n", "repo_name": "onejgordon/active-dynamical-prospection", "sub_path": "model/environment.py", "file_name": "environment.py", "file_ext": "py", "file_size_in_byte": 4165, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "94", "api": [{"api_name": "json.load", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 38, "usage_type": "call"}, {"api_name": "scipy.spatial.cKDTree", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 51, "usage_type": "call"}, {"api_name": "constants.FOV_R", "line_number": 53, "usage_type": "name"}, {"api_name": "constants.FOV_R", "line_number": 63, "usage_type": "name"}, {"api_name": "constants.GOAL_R", "line_number": 70, "usage_type": "name"}, {"api_name": "constants.GOAL_R", "line_number": 79, "usage_type": "name"}, {"api_name": "constants.NODE_R", "line_number": 79, "usage_type": "name"}, {"api_name": "constants.FOV_R", "line_number": 88, "usage_type": "name"}, {"api_name": "constants.NODE_R", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "constants.MAP_W", "line_number": 94, "usage_type": "name"}, {"api_name": "constants.MAP_H", "line_number": 94, "usage_type": "name"}, {"api_name": "constants.GOAL_R", "line_number": 100, "usage_type": "name"}, {"api_name": "constants.NODE_R", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.patches.Circle", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.patches.Circle", "line_number": 105, "usage_type": "call"}, {"api_name": "constants.FOV_R", "line_number": 105, "usage_type": "argument"}, {"api_name": "matplotlib.patches.Circle", "line_number": 107, "usage_type": "call"}, {"api_name": "constants.AGENT_R", "line_number": 107, "usage_type": "argument"}, {"api_name": "matplotlib.patches.Circle", "line_number": 108, "usage_type": "call"}, {"api_name": "constants.REACH_ZONE_R", "line_number": 108, "usage_type": "argument"}, {"api_name": "constants.MAP_W", "line_number": 116, "usage_type": "name"}, {"api_name": "constants.MAP_H", "line_number": 117, "usage_type": "name"}]} +{"seq_id": "33899608536", "text": "import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import accuracy_score\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import StandardScaler\n\n\ndef draw_graph(x_M, y_M, x_B, y_B, labelX, labelY):\n plt.figure(figsize=(7, 8))\n plt.title(\" Распределение видов \\n по двум признакам \")\n plt.scatter(x_M, y_M, marker='^', label=\"M\")\n plt.scatter(x_B, y_B, marker='o', label=\"B\")\n plt.xlabel(labelX)\n plt.ylabel(labelY)\n plt.legend()\n plt.grid()\n\n\ndata_frame = pd.get_dummies(pd.read_csv(\"breast_cancer.csv\"), columns=[\"diagnosis\"])\ntags_knn = data_frame.get([\"diagnosis_B\", \"diagnosis_M\"]).to_numpy()\ntags_lr = data_frame.get(\"diagnosis_B\").to_numpy()\n\n# Построение графиков по двум признакам\n# data_M = data_frame[data_frame.diagnosis_M == 1]\n# data_B = data_frame[data_frame.diagnosis_B == 1]\n# field1 = \"texture_mean\"\n# field2 = \"area_mean\"\n#\n# draw_graph(data_M.get([field1]), data_M.get([field2]),\n# data_B.get([field1]), data_B.get([field2]),\n# field1, field2)\n\n# Неплохое сочетание признаков\n# field1 = \"radius_worst\"\n# field2 = \"texture_worst\"\n# draw_graph(data_M.get([field1]), data_M.get([field2]),\n# data_B.get([field1]), data_B.get([field2]),\n# field1, field2)\n# plt.show()\n\ndata_frame.drop([\"Unnamed: 32\", \"diagnosis_B\", \"diagnosis_M\", \"id\"], axis=1, inplace=True)\n\n\ndef knNeighbors(data_set, answers, isScaler):\n number_of_neighbors = list()\n predict_test_set = list()\n predict_train_set = list()\n average_value = list()\n\n if isScaler:\n scaler = StandardScaler()\n data = scaler.fit_transform(data_set)\n else:\n data = data_set.to_numpy()\n\n x_train, x_test, y_train, y_test = train_test_split(data, answers, test_size=0.3, random_state=0)\n kf = KFold(n_splits=5, shuffle=True)\n\n for i in range(1, 50):\n knn = KNeighborsClassifier(i)\n # for train_index, test_index in kf.split(data):\n # knn.fit(data[train_index], answers[train_index])\n knn.fit(x_train, y_train)\n number_of_neighbors.append(i)\n predict_test_set.append(accuracy_score(y_test, knn.predict(x_test)))\n predict_train_set.append(accuracy_score(y_train, knn.predict(x_train)))\n array = cross_val_score(knn, x_test, y_test, cv=kf, scoring='accuracy')\n average = sum(array) / len(array)\n average_value.append(average)\n print(f\"Оптимальное значение количества соседей = {predict_test_set.index(max(predict_test_set)) + 1}\")\n print(f\"Значение точности классификации при этом {max(predict_test_set)}\")\n\n plt.subplot(2, 1, 1)\n plt.plot(number_of_neighbors, predict_test_set, label=\"Тестовые данные\")\n plt.plot(number_of_neighbors, predict_train_set, label=\"Тренировочные данные\")\n plt.title(\"Зависимость точности от количества соседей\")\n plt.ylabel(\"p - точность\")\n plt.legend()\n plt.grid()\n plt.subplot(2, 1, 2)\n plt.plot(number_of_neighbors, average_value)\n plt.grid()\n plt.ylabel(\"average - среднее значение\")\n plt.xlabel(\"n - количество соседей\")\n plt.show()\n\n\ndef logisticRegression(data_set, answers, isScaler):\n list_of_c = list()\n result_test = list()\n result_train = list()\n average_value = list()\n\n if isScaler:\n scaler = StandardScaler()\n data = scaler.fit_transform(data_set)\n else:\n data = data_set.to_numpy()\n x_train, x_test, y_train, y_test = train_test_split(data, answers, test_size=0.3, random_state=0)\n\n kf = KFold(n_splits=5, shuffle=True)\n\n for i in np.arange(0.01, 1, 0.01):\n lr = LogisticRegression(C=i)\n # for train_index, test_index in kf.split(data):\n # lr.fit(data[train_index], answers[train_index])\n lr.fit(x_train, y_train)\n list_of_c.append(i)\n result_test.append(accuracy_score(y_test, lr.predict(x_test)))\n result_train.append(accuracy_score(y_train, lr.predict(x_train)))\n array = cross_val_score(lr, x_test, y_test, cv=kf, scoring='accuracy')\n average = sum(array) / len(array)\n average_value.append(average)\n print(f\"Оптимальное значение параметра С = {list_of_c[result_test.index(max(result_test))]:1.2f}\")\n print(f\"Значение точности классификации при этом {max(result_test)}:1.2f\")\n plt.subplot(2, 1, 1)\n plt.plot(list_of_c, result_test, label=\"Тестовая выборка\")\n plt.plot(list_of_c, result_train, label=\"Тренировочная выборка\")\n plt.title(\"Зависимость вероятности от параметра C\")\n plt.ylabel(\"p - значение вероятности \")\n plt.legend()\n plt.grid()\n plt.subplot(2, 1, 2)\n plt.plot(list_of_c, average_value)\n plt.ylabel(\"average - среднее значение \")\n plt.xlabel(\"С - параметр регуляризации \")\n plt.grid()\n plt.show()\n\n\nknNeighbors(data_frame, tags_knn, False)\nlogisticRegression(data_frame, tags_lr, False)\nknNeighbors(data_frame, tags_knn, True)\nlogisticRegression(data_frame, tags_lr, True)\n", "repo_name": "kondraschovAV1999/machinlearning", "sub_path": "LR1/LR1.py", "file_name": "LR1.py", "file_ext": "py", "file_size_in_byte": 5629, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "pandas.get_dummies", "line_number": 24, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 24, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 56, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 61, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 62, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 65, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 70, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 71, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 100, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 104, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 108, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 109, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 114, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 115, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 128, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}]} +{"seq_id": "21643756034", "text": "from bisect import insort\nfrom collections import defaultdict\n\n\nclass Solution:\n def findOrder(self, numCourses, prereq):\n courses = defaultdict(set)\n {insort(courses[c], p) for c, p in prereq}\n return sorted(range(numCourses), key=lambda x: len(courses[x]))\n\n\nif __name__ == '__main__':\n Solution().findOrder(1, [])\n", "repo_name": "Sma-Das/Leetcode", "sub_path": "2-Medium/findOrder.py", "file_name": "findOrder.py", "file_ext": "py", "file_size_in_byte": 344, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "collections.defaultdict", "line_number": 7, "usage_type": "call"}, {"api_name": "bisect.insort", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "8608858000", "text": "# from flask import request\nfrom lidarts import socketio, db\nfrom flask import current_app\nfrom flask_login import current_user\nfrom flask_socketio import disconnect, emit, join_room, ConnectionRefusedError\nfrom lidarts.socket.utils import send_notification\nfrom lidarts.models import Notification, SocketConnections, UserSettings\nfrom datetime import datetime\n\n\n@socketio.on('connect', namespace='/base')\ndef connect_client():\n if not current_user.is_authenticated:\n return\n\n # current_user.ping()\n current_app.redis.sadd('last_seen_bulk_user_ids', current_user.id)\n\n join_room(current_user.username)\n notifications = Notification.query.filter_by(user=current_user.id).all()\n for notification in notifications:\n send_notification(current_user.username, notification.message, notification.author, notification.type, silent=True)\n\n emit('status_reply', {'status': current_user.status})\n\n\n@socketio.on('init', namespace='/base')\ndef init(message):\n user_id = message['user_id']\n settings = (\n UserSettings.query\n .filter_by(user=user_id).first()\n )\n if not settings:\n settings = UserSettings(user=user_id)\n db.session.add(settings)\n db.session.commit()\n notification_sound = settings.notification_sound\n emit(\n 'settings',\n {'notification_sound': notification_sound, }\n )\n\n\n@socketio.on('user_heartbeat', namespace='/base')\ndef heartbeat(message):\n user_id = message['user_id']\n # current_user.ping()\n current_app.redis.sadd('last_seen_bulk_user_ids', user_id)\n\n\n@socketio.on('disconnect', namespace='/base')\ndef disconnect_client():\n if not current_user.is_authenticated:\n return\n #current_user.last_seen = datetime.utcnow()\n #current_user.is_online = False\n #db.session.commit()\n current_app.redis.sadd('last_seen_bulk_user_ids', current_user.id)\n", "repo_name": "mischkadb/lidarts", "sub_path": "lidarts/socket/base_handler.py", "file_name": "base_handler.py", "file_ext": "py", "file_size_in_byte": 1883, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 34, "dataset": "github-code", "pt": "94", "api": [{"api_name": "flask_login.current_user.is_authenticated", "line_number": 13, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 13, "usage_type": "name"}, {"api_name": "flask.current_app.redis.sadd", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.current_app.redis", "line_number": 17, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 17, "usage_type": "name"}, {"api_name": "flask_login.current_user.id", "line_number": 17, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 17, "usage_type": "name"}, {"api_name": "flask_socketio.join_room", "line_number": 19, "usage_type": "call"}, {"api_name": "flask_login.current_user.username", "line_number": 19, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 19, "usage_type": "name"}, {"api_name": "lidarts.models.Notification.query.filter_by", "line_number": 20, "usage_type": "call"}, {"api_name": "lidarts.models.Notification.query", "line_number": 20, "usage_type": "attribute"}, {"api_name": "lidarts.models.Notification", "line_number": 20, "usage_type": "name"}, {"api_name": "flask_login.current_user.id", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 20, "usage_type": "name"}, {"api_name": "lidarts.socket.utils.send_notification", "line_number": 22, "usage_type": "call"}, {"api_name": "flask_login.current_user.username", "line_number": 22, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 22, "usage_type": "name"}, {"api_name": "flask_socketio.emit", "line_number": 24, "usage_type": "call"}, {"api_name": "flask_login.current_user.status", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 24, "usage_type": "name"}, {"api_name": "lidarts.socketio.on", "line_number": 11, "usage_type": "call"}, {"api_name": "lidarts.socketio", "line_number": 11, "usage_type": "name"}, {"api_name": "lidarts.models.UserSettings.query.filter_by", "line_number": 31, "usage_type": "call"}, {"api_name": "lidarts.models.UserSettings.query", "line_number": 31, "usage_type": "attribute"}, {"api_name": "lidarts.models.UserSettings", "line_number": 31, "usage_type": "name"}, {"api_name": "lidarts.models.UserSettings", "line_number": 35, "usage_type": "call"}, {"api_name": "lidarts.db.session.add", "line_number": 36, "usage_type": "call"}, {"api_name": "lidarts.db.session", "line_number": 36, "usage_type": "attribute"}, {"api_name": "lidarts.db", "line_number": 36, "usage_type": "name"}, {"api_name": "lidarts.db.session.commit", "line_number": 37, "usage_type": "call"}, {"api_name": "lidarts.db.session", "line_number": 37, "usage_type": "attribute"}, {"api_name": "lidarts.db", "line_number": 37, "usage_type": "name"}, {"api_name": "flask_socketio.emit", "line_number": 39, "usage_type": "call"}, {"api_name": "lidarts.socketio.on", "line_number": 27, "usage_type": "call"}, {"api_name": "lidarts.socketio", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.current_app.redis.sadd", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.current_app.redis", "line_number": 49, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 49, "usage_type": "name"}, {"api_name": "lidarts.socketio.on", "line_number": 45, "usage_type": "call"}, {"api_name": "lidarts.socketio", "line_number": 45, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 54, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 54, "usage_type": "name"}, {"api_name": "flask.current_app.redis.sadd", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.current_app.redis", "line_number": 59, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 59, "usage_type": "name"}, {"api_name": "flask_login.current_user.id", "line_number": 59, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 59, "usage_type": "name"}, {"api_name": "lidarts.socketio.on", "line_number": 52, "usage_type": "call"}, {"api_name": "lidarts.socketio", "line_number": 52, "usage_type": "name"}]} +{"seq_id": "40637184159", "text": "from collections import defaultdict\n\nimport numpy as np\nfrom bs4 import BeautifulSoup\n\nfrom textstat.textstat_ext import textstat\nfrom utils.html_utils import get_node_text, process_html\nfrom utils.word_utils import is_stop_word\n\n\nclass FeaturesCalculator:\n HTML_FEATURES_COUNT = 3\n TEXT_FEATURES_COUNT = 16\n OTHER_FEATURES_COUNT = 2\n QA_OVERLAP_FEATURES_COUNT = 4\n LINGUISTIC_FEATURES_COUNT = HTML_FEATURES_COUNT + TEXT_FEATURES_COUNT\n FEATURES_COUNT = LINGUISTIC_FEATURES_COUNT + OTHER_FEATURES_COUNT\n\n def __init__(self):\n self.calculated_features = {}\n self.word_count = defaultdict(int)\n self.documents = 0\n self.maximum_features = np.zeros(FeaturesCalculator.FEATURES_COUNT)\n self.answer_features_for_question = {}\n\n def handle_answer_question(self, answer_id, question_id, answer_html_text, age, score):\n answer_words = set(textstat.lexicon(process_html(answer_html_text)))\n for word in answer_words:\n self.word_count[word] += 1\n self.documents += 1\n features = self._calculate_features(answer_id, answer_html_text, age, score)\n self.maximum_features = np.maximum(self.maximum_features, features)\n if question_id not in self.answer_features_for_question:\n self.answer_features_for_question[question_id] = [[] for _ in range(len(features))]\n for i in range(len(features)):\n self.answer_features_for_question[question_id][i].append(features[i])\n\n def process_answers(self):\n for question_id in self.answer_features_for_question:\n for i in range(FeaturesCalculator.FEATURES_COUNT):\n self.answer_features_for_question[question_id][i].sort()\n self.answer_features_for_question[question_id][-2].reverse()\n\n def get_features(self, answer_id, question_id, answer_html_text, question_html_text):\n features = self.calculated_features[answer_id]\n answers = self.answer_features_for_question[question_id]\n normalized_features = np.divide(features, self.maximum_features)\n normalized_by_question_features = []\n features_relative_positions = []\n for i in range(FeaturesCalculator.FEATURES_COUNT):\n maximum_feature_value = max(answers[i]) if max(answers[i]) > 0 else 1\n normalized_by_question_features.append(features[i] / maximum_feature_value)\n features_relative_positions.append(answers[i].index(features[i]) / len(answers[i]))\n overlap_features = self._calculate_overlap_features(process_html(answer_html_text),\n process_html(question_html_text))\n return features \\\n + normalized_features.tolist() \\\n + normalized_by_question_features \\\n + features_relative_positions \\\n + overlap_features\n\n def _calculate_features(self, id, html_text, age, score):\n if id not in self.calculated_features:\n soup = BeautifulSoup(html_text, 'html.parser')\n text = ' '.join(list(filter(None, [get_node_text(node).strip() for node in soup.childGenerator()])))\n self.calculated_features[id] = FeaturesCalculator.get_html_features(\n soup) + FeaturesCalculator.get_text_features(text) + [age, score]\n return self.calculated_features[id]\n\n @staticmethod\n def get_html_features(soup):\n return [FeaturesCalculator.get_tag_count(soup, 'a'),\n FeaturesCalculator.get_tag_count(soup, 'code'),\n FeaturesCalculator.get_tag_count(soup, 'p')]\n\n @staticmethod\n def get_tag_count(soup, tag):\n return len(soup.find_all(tag))\n\n @staticmethod\n def get_text_features(text):\n if textstat.lexicon_count(text) == 0:\n return [0 for _ in range(FeaturesCalculator.TEXT_FEATURES_COUNT)]\n length = len(text)\n return [\n textstat.uppercase_letter_count(text) / length,\n textstat.lowercase_letter_count(text) / length,\n textstat.space_count(text) / length,\n length,\n textstat.longest_sentence_char_count(text),\n textstat.longest_sentence_lexicon_count(text),\n textstat.lexicon_count(text) / textstat.sentence_count(text),\n textstat.letter_count(text) / textstat.lexicon_count(text),\n textstat.sentence_count(text),\n textstat.automated_readability_index(text),\n textstat.flesch_reading_ease(text),\n textstat.smog_index(text),\n textstat.flesch_kincaid_grade(text),\n textstat.coleman_liau_index(text),\n textstat.gunning_fog(text),\n textstat.lix(text)\n ]\n\n def _calculate_overlap_features(self, answer_text, question_text):\n answer_words = set(textstat.lexicon(answer_text))\n answer_filtered_words = set(filter(lambda word: not is_stop_word(word), answer_words))\n question_words = set(textstat.lexicon(question_text))\n question_filtered_words = set(filter(lambda word: not is_stop_word(word), question_words))\n features = []\n for a_words, q_words in [(answer_words, question_words), (answer_filtered_words, question_filtered_words)]:\n union = a_words.union(q_words)\n intersection = a_words.intersection(q_words)\n features.append(len(intersection) / len(union))\n intersection_idf = 0\n union_idf = 0\n for word in intersection:\n intersection_idf += np.log(self.documents / (self.word_count[word] + 1))\n for word in union:\n union_idf += np.log(self.documents / (self.word_count[word] + 1))\n features.append(intersection_idf / union_idf)\n return features", "repo_name": "Nikitosh/StackOverflow-Correct-Answer-Detection", "sub_path": "utils/features_calculator.py", "file_name": "features_calculator.py", "file_ext": "py", "file_size_in_byte": 5797, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "94", "api": [{"api_name": "collections.defaultdict", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 23, "usage_type": "call"}, {"api_name": "textstat.textstat_ext.textstat.lexicon", "line_number": 27, "usage_type": "call"}, {"api_name": "textstat.textstat_ext.textstat", "line_number": 27, "usage_type": "name"}, {"api_name": "utils.html_utils.process_html", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.divide", "line_number": 47, "usage_type": "call"}, {"api_name": "utils.html_utils.process_html", "line_number": 54, "usage_type": "call"}, {"api_name": "utils.html_utils.process_html", "line_number": 55, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 64, "usage_type": "call"}, {"api_name": "utils.html_utils.get_node_text", "line_number": 65, "usage_type": "call"}, {"api_name": "textstat.textstat_ext.textstat.lexicon_count", "line_number": 82, "usage_type": "call"}, {"api_name": "textstat.textstat_ext.textstat", "line_number": 82, "usage_type": "name"}, {"api_name": "textstat.textstat_ext.textstat.uppercase_letter_count", "line_number": 86, "usage_type": "call"}, {"api_name": "textstat.textstat_ext.textstat", "line_number": 86, "usage_type": "name"}, {"api_name": "textstat.textstat_ext.textstat.lowercase_letter_count", "line_number": 87, "usage_type": "call"}, {"api_name": "textstat.textstat_ext.textstat", "line_number": 87, "usage_type": "name"}, {"api_name": "textstat.textstat_ext.textstat.space_count", "line_number": 88, "usage_type": "call"}, {"api_name": "textstat.textstat_ext.textstat", "line_number": 88, "usage_type": "name"}, {"api_name": "textstat.textstat_ext.textstat.longest_sentence_char_count", "line_number": 90, "usage_type": "call"}, {"api_name": "textstat.textstat_ext.textstat", "line_number": 90, "usage_type": "name"}, {"api_name": "textstat.textstat_ext.textstat.longest_sentence_lexicon_count", "line_number": 91, "usage_type": "call"}, {"api_name": "textstat.textstat_ext.textstat", "line_number": 91, "usage_type": "name"}, {"api_name": "textstat.textstat_ext.textstat.lexicon_count", "line_number": 92, "usage_type": "call"}, {"api_name": "textstat.textstat_ext.textstat", "line_number": 92, "usage_type": "name"}, {"api_name": "textstat.textstat_ext.textstat.sentence_count", "line_number": 92, "usage_type": "call"}, {"api_name": "textstat.textstat_ext.textstat.letter_count", "line_number": 93, "usage_type": "call"}, {"api_name": "textstat.textstat_ext.textstat", "line_number": 93, "usage_type": "name"}, {"api_name": "textstat.textstat_ext.textstat.lexicon_count", "line_number": 93, "usage_type": "call"}, {"api_name": "textstat.textstat_ext.textstat.sentence_count", "line_number": 94, "usage_type": "call"}, {"api_name": "textstat.textstat_ext.textstat", "line_number": 94, "usage_type": "name"}, {"api_name": "textstat.textstat_ext.textstat.automated_readability_index", "line_number": 95, "usage_type": "call"}, {"api_name": "textstat.textstat_ext.textstat", "line_number": 95, "usage_type": "name"}, {"api_name": "textstat.textstat_ext.textstat.flesch_reading_ease", "line_number": 96, "usage_type": "call"}, {"api_name": "textstat.textstat_ext.textstat", "line_number": 96, "usage_type": "name"}, {"api_name": "textstat.textstat_ext.textstat.smog_index", "line_number": 97, "usage_type": "call"}, {"api_name": "textstat.textstat_ext.textstat", "line_number": 97, "usage_type": "name"}, {"api_name": "textstat.textstat_ext.textstat.flesch_kincaid_grade", "line_number": 98, "usage_type": "call"}, {"api_name": "textstat.textstat_ext.textstat", "line_number": 98, "usage_type": "name"}, {"api_name": "textstat.textstat_ext.textstat.coleman_liau_index", "line_number": 99, "usage_type": "call"}, {"api_name": "textstat.textstat_ext.textstat", "line_number": 99, "usage_type": "name"}, {"api_name": "textstat.textstat_ext.textstat.gunning_fog", "line_number": 100, "usage_type": "call"}, {"api_name": "textstat.textstat_ext.textstat", "line_number": 100, "usage_type": "name"}, {"api_name": "textstat.textstat_ext.textstat.lix", "line_number": 101, "usage_type": "call"}, {"api_name": "textstat.textstat_ext.textstat", "line_number": 101, "usage_type": "name"}, {"api_name": "textstat.textstat_ext.textstat.lexicon", "line_number": 105, "usage_type": "call"}, {"api_name": "textstat.textstat_ext.textstat", "line_number": 105, "usage_type": "name"}, {"api_name": "utils.word_utils.is_stop_word", "line_number": 106, "usage_type": "call"}, {"api_name": "textstat.textstat_ext.textstat.lexicon", "line_number": 107, "usage_type": "call"}, {"api_name": "textstat.textstat_ext.textstat", "line_number": 107, "usage_type": "name"}, {"api_name": "utils.word_utils.is_stop_word", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 119, "usage_type": "call"}]} +{"seq_id": "12999946617", "text": "from re_parse import *\n\nimport itertools\n\ndigit = r'[0-9]'\nnum = digit + '+'\ndigits = lambda n='+': digit + re_count(n)\n\ntime = lambda postfix='': (g('hour' + postfix, digits((1,2))) + ':' + g('minute' + postfix, digits(2)))\ntime_range = (time() + o('-' + time('_range')))\n#dayname = or_('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')\n#DAYNAME can contain any non whitespace-character besides +, -, ], >, a digit or \\n.\ndayname = n_(nin_(r'-+\\]>\\s\\d' + '\\n'))\nduration_chars = r'ymwdh'\n\n\n\nn = lambda *args: '_'.join(filter(bool, args))\n\nmodifier_duration = lambda name: (\n g(n(name, 'num'), num)\n + g(n(name, 'timespan'), in_(duration_chars))\n)\n\ndef modifier_duration_range(name):\n rname = n(name, 'range')\n return (\n modifier_duration(name) + o('/' + g(rname, modifier_duration(rname)))\n )\n\ndate_modifier = lambda typ, name='': (\n g(n(name, 'type'), typ)\n + modifier_duration(name)\n)\n\ndate_modifier_range = lambda typ, name='': (\n g(n(name, 'type'), typ)\n + modifier_duration_range(name)\n)\n\nrepeater_chars = [esc_(s) for s in ['.+', '++', '+']]\ndef repeater(postfix):\n pf = 'repeater' + postfix\n return g(pf, date_modifier_range(or_(*repeater_chars), pf))\n\ndate_shift = lambda postfix: g('shift' + postfix, date_modifier(or_('--', '-'), 'shift' + postfix))\n\ndate = (g('year', digits(4)) + '-' + g('month', digits(2)) + '-' + g('day', digits(2)))\n\nactive = '<', '>'\ninactive = r'\\[', r'\\]'\n\ndef date_paren(s, n=''):\n name = '_active%s' % n\n return or_(g(name, r'<'), r'\\[') + s + ref_yes_no_(name, '>', r'\\]')\n\n\ndef date_stamp(time, extra=(repeater,), postfix=''):\n _g = lambda n, s: g(n + postfix, s)\n #lax: dayname not necessarily optional\n d = _g('date', date) + o(ws + _g('dayname', dayname)) + o(ws + _g('time', time))\n if extra:\n #extra = [o(ws + _g('extra%s' % i, e)) for i, e in enumerate(extra)]\n extras = itertools.permutations(extra)\n\n extras = [[o(ws + e(postfix + str(i))) for e in extra] for i, extra in enumerate(extras)]\n extras = [''.join(extra) for extra in extras]\n extra = or_(*extras)\n d += extra\n return d\n\nfull_date_stamp = date_paren(date_stamp(time_range, (repeater, date_shift)))\n\ndef date_range(extra=None):\n p = date_paren\n return p(date_stamp(time_range, extra, '_0'), 0) + o('--' + g('daterange', p(date_stamp(time(), extra, '_1'), 1)))\n\n\nfull_date_range = date_range((repeater, date_shift))\n\nsimple_date = n_(or_(r'[-+.:/ \\d' + duration_chars + ']+', dayname))\nsimple_date_range = g('date', date_paren(simple_date, 0)) + o('--' + g('daterange', date_paren(simple_date, 1)))\n\n\nnamed_date = lambda name, extra=None: (name + ':' + ws + date_range(extra))\n\n#todo closed still stuppports time range etc\nclosed = named_date('CLOSED', (repeater, date_shift, ))\nscheduled = named_date('SCHEDULED', (repeater, date_shift,))\ndeadlined = named_date('DEADLINE', (repeater, date_shift,))\n\ndef named_dates():\n withws = lambda a, b, c: a + ows + b + ows + c\n return ows + or_(\n withws(closed, scheduled, o(deadlined))\n , withws(closed, o(deadlined), o(scheduled))\n , withws(scheduled, deadlined, o(closed))\n , withws(scheduled, o(closed), o(deadlined))\n , withws(deadlined, closed, o(scheduled))\n , withws(deadlined, o(scheduled), o(closed))\n ) + ows\n\n\n", "repo_name": "hrehfeld/python-org-mode", "sub_path": "org/date_re.py", "file_name": "date_re.py", "file_ext": "py", "file_size_in_byte": 3341, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "94", "api": [{"api_name": "itertools.permutations", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "24012389797", "text": "from celery import shared_task\n\nfrom scrapper.rozetka_operations.db_operations import insert_items # noqa\nfrom scrapper.rozetka_operations.rozetka_api import RozetkaAPI # noqa\n\n\n@shared_task(name='get_and_save_items', queue='celery')\ndef get_and_save_items(product_ids):\n rozetka = RozetkaAPI()\n\n for item_id in product_ids.split('\\n'):\n rosetka_item_values = rozetka.get_item_data(item_id)\n if rosetka_item_values:\n insert_items(rosetka_item_values)\n print('Added:', item_id)\n else:\n print('Not found:', item_id)\n", "repo_name": "millarissa/GH_Python_homework", "sub_path": "HW-23/scrapper_app/scrapper/tasks.py", "file_name": "tasks.py", "file_ext": "py", "file_size_in_byte": 574, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "scrapper.rozetka_operations.rozetka_api.RozetkaAPI", "line_number": 9, "usage_type": "call"}, {"api_name": "scrapper.rozetka_operations.db_operations.insert_items", "line_number": 14, "usage_type": "call"}, {"api_name": "celery.shared_task", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "31118880171", "text": "import os\nimport argparse\n\nfrom preprocess.data import DataLoader, make_lookup_table, load_file\n\nfrom models.sentencepiece import Graph\nfrom models.embedding import W2vGraph\nfrom models.gb_cnn import ConvolutionClassifier\n\nfrom utils.chker import file_exist\nfrom keras.callbacks import ReduceLROnPlateau\n\n\ndef hyper_parameter():\n args = argparse.ArgumentParser(description='text classification or regression model training hyper-parameter')\n # ------\n # Text data parameter\n args.add_argument('-i', '--input_text', default='data/ratings.txt', type=str, help='text location to make text only file')\n args.add_argument('-o', '--output_location', default='data/text_only.txt', type=str, help=\"text only file location to save\")\n args.add_argument('-max_len', '--max_len', default=30, type=int, help=\"Parameter for training neural network to fix input sentence len\")\n # ------\n # Sentencepiece model parameter for train and save\n args.add_argument('-sp_text_path', default='data/text_only.txt', type=str, help=\"input text location\")\n args.add_argument('-sp_model_path', default='pre_train/spm_model', type=str, help=\"trained model save location\")\n args.add_argument('-sp_type', default='bpe', type=str, help=\" model type. Choose from bpe (default), unigram, char, or word The input sentence must be pretokenized when using word type.\")\n args.add_argument('-sp_vocab_size', default=37000, type=int, help=\"vocabulary size\")\n args.add_argument('-sp_coverage', default=0.9995, type=float, help=\"amount of characters covered by the model\")\n args.add_argument('-sp_train', default=False, type=bool, help=\"use sentencepiece\")\n\n # ------\n # Word2vec model parameter for train and save\n args.add_argument('-w2v_model_path', default='pre_train/word2vec.model', type=str, help=\"trained model save loaction\")\n args.add_argument('-w2v_text_path', default='data/text_only.txt', type=str, help=\"location of text to train word2vec\")\n args.add_argument('-w2v_embed_size', default=300, type=int, help=\"word embedding size\")\n args.add_argument('-w2v_type', default=1, type=int, help=\"0 for CBOW, 1 for skip-gram\")\n args.add_argument('-w2v_loss', default=1, type=int, help=\"0 for negative sampling, 1 for hierarchical softmax\")\n args.add_argument('-w2v_min_count', default=3, type=int, help=\"minimum count of words to train\")\n args.add_argument('-w2v_window_size', default=2, type=int, help=\"window size of training model\")\n args.add_argument('-w2v_train', default=False, type=bool, help=\"use wrod2vec\")\n\n # ------\n # Neural Net parameter for train and save\n args.add_argument('-nn_model_path', default='weights/', type=str, help='location of trained model to save')\n args.add_argument('-nn_model', default='cnn', type=str, help='parameter to choose which model to train. list of model = [lstm, cnn]')\n args.add_argument('-nn_test_set_path', default='data/test_co_classification.txt')\n args.add_argument('-nn_train_set_path', default='data/train_co_classification.txt')\n args.add_argument('-nn_keep_prob', default=0.25, type=float)\n args.add_argument('-nn_filters', default=32, type=int)\n args.add_argument('-nn_train_type', default=\"w2v\", type=str)\n args.add_argument('-nn_batch_size', default=128, type=int)\n args.add_argument('-nn_epoch_size', default=5, type=int)\n\n return args.parse_args()\n\n\ndef sp_worker(args):\n \"\"\" sentencepiece_worker is to train and save model\n \"\"\"\n # ------\n # Data Loader to prepare training and saving 'sentence_piece' model\n DataLoader(data_load_path=args.input_text, data_save_path=args.output_location)\n\n # ------\n # Build model and training\n sp_model = Graph(\n text_path=args.sp_text_path,\n model_path=args.sp_model_path,\n type=args.sp_type,\n vocab_size=args.sp_vocab_size,\n coverage=args.sp_coverage\n )\n sp_model.op_train()\n file_exist(args.sp_model_path + \".model\")\n file_exist(args.sp_text_path)\n\n\ndef w2v_worker(args, text_list):\n \"\"\" word2vec worker is to train and save model\n \"\"\"\n # ------\n # Make indexed text\n padded_text = make_lookup_table(\n whole_text=text_list,\n text_list=text_list,\n vocab_size=args.sp_vocab_size,\n spm_model_path=args.sp_model_path,\n max_len=args.max_len\n )\n\n # ------\n # Train word2vec model and save\n w2v_model = W2vGraph(\n model_path=args.w2v_model_path,\n text_list=padded_text,\n embedding_size=args.w2v_embed_size,\n type=args.w2v_type,\n loss=args.w2v_loss,\n min_count=args.w2v_min_count,\n window_size=args.w2v_window_size\n )\n w2v_model.op_train()\n\n\ndef nn_train_worker(args):\n \"\"\" nn_train_worker is to train Neural Network and save model\n \"\"\"\n # ------\n # Prepare train & test data\n whole_text, _ = load_file(data_load_path=args.input_text)\n train_text, train_label = load_file(data_load_path=args.nn_train_set_path)\n test_text, test_label = load_file(data_load_path=args.nn_test_set_path)\n # ------\n # stage.1: train dataset str -> idx\n train_text = make_lookup_table(\n whole_text=whole_text,\n text_list=train_text,\n vocab_size=args.sp_vocab_size,\n spm_model_path=args.sp_model_path,\n max_len=args.max_len\n )\n # ------\n # stage.2: test dataset str -> idx\n test_text = make_lookup_table(\n whole_text=whole_text,\n text_list=test_text,\n vocab_size=args.sp_vocab_size,\n spm_model_path=args.sp_model_path,\n max_len=args.max_len\n )\n # ------\n # initialization gb convoltuion neural network ()\n nn_model = ConvolutionClassifier(\n classes=2,\n vocab_size=args.sp_vocab_size,\n embed_size=args.w2v_embed_size,\n max_len=args.max_len,\n keep_prob=args.nn_keep_prob,\n filters=args.nn_filters,\n w2v_model_path=args.w2v_model_path,\n train_type=args.nn_train_type\n )\n\n nn_model_train = nn_model.build_graph()\n reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=1, min_lr=1e-5)\n nn_model_train.fit(train_text, train_label,\n batch_size=args.nn_batch_size,\n epochs=args.nn_epoch_size,\n verbose=1,\n validation_data=[test_text, test_label],\n callbacks=[reduce_lr])\n\n nn_model_train.save(args.nn_model_path + args.nn_model + '.h5')\n\n\ndef main():\n # ------\n # training mode require\n os.environ['CUDA_VISIBLE_DEVICES'] = str(1)\n # Get hyper parameters for other operations\n args = hyper_parameter()\n print(\"[@] Hyper parameter loaded: {}\".format(args))\n\n # ------\n # Prepare dataset\n whole_text, _ = load_file(data_load_path=args.input_text)\n\n # ------\n # Train and save 'sentence_piece' model\n if args.sp_train:\n sp_worker(args)\n # print(\"[@] Sentencepiece csv file path {}, model file path {}\")\n\n # ------\n # Training Word2vec model and save // prepare lookup_table\n if args.w2v_train:\n w2v_worker(args, whole_text)\n # print(\"[@] Word2vec model saved\")\n\n # ------\n # Network train\n nn_train_worker(args)\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "helakim/goblin-ai", "sub_path": "keras_version/text_classification/model_train.py", "file_name": "model_train.py", "file_ext": "py", "file_size_in_byte": 7270, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "94", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 15, "usage_type": "call"}, {"api_name": "preprocess.data.DataLoader", "line_number": 61, "usage_type": "call"}, {"api_name": "models.sentencepiece.Graph", "line_number": 65, "usage_type": "call"}, {"api_name": "utils.chker.file_exist", "line_number": 73, "usage_type": "call"}, {"api_name": "utils.chker.file_exist", "line_number": 74, "usage_type": "call"}, {"api_name": "preprocess.data.make_lookup_table", "line_number": 82, "usage_type": "call"}, {"api_name": "models.embedding.W2vGraph", "line_number": 92, "usage_type": "call"}, {"api_name": "preprocess.data.load_file", "line_number": 109, "usage_type": "call"}, {"api_name": "preprocess.data.load_file", "line_number": 110, "usage_type": "call"}, {"api_name": "preprocess.data.load_file", "line_number": 111, "usage_type": "call"}, {"api_name": "preprocess.data.make_lookup_table", "line_number": 114, "usage_type": "call"}, {"api_name": "preprocess.data.make_lookup_table", "line_number": 123, "usage_type": "call"}, {"api_name": "models.gb_cnn.ConvolutionClassifier", "line_number": 132, "usage_type": "call"}, {"api_name": "keras.callbacks.ReduceLROnPlateau", "line_number": 144, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 158, "usage_type": "attribute"}, {"api_name": "preprocess.data.load_file", "line_number": 165, "usage_type": "call"}]} +{"seq_id": "4940755166", "text": "import os\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = '0'\nimport argparse\nimport logging\n\nimport random\nimport numpy as np\n\nimport tensorflow as tf\n\nimport train\nimport infer\nfrom utils import evaluation_utils\nfrom utils import vocab_utils\nfrom utils import misc_utils as utils\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n\n parser.register(\"type\", \"bool\", lambda v: v.lower() == \"true\") \n\n # network\n parser.add_argument(\"--embed_size\", type=int, default=1024, help=\"Embedding size.\")\n parser.add_argument(\"--hidden_size\", type=int, default=1024, help=\"Hidden state size.\")\n parser.add_argument(\"--num_layers\", type=int, default=4,\n help=\"Network depth.\") \n parser.add_argument(\"--attention\", type=str, default=\"normed_bahdanau\", help=\"\"\"\\\n luong | scaled_luong | bahdanau | normed_bahdanau\"\"\")\n parser.add_argument(\n \"--output_attention\", type=\"bool\", nargs=\"?\", const=True,\n default=True,\n help=\"\"\"\\\n Only used in standard attention_architecture. Whether use attention as\n the cell output at each timestep.\n .\\\n \"\"\")\n # optimizer\n parser.add_argument(\"--optimizer\", type=str, default=\"sgd\", help=\"sgd | adam\")\n parser.add_argument(\"--learning_rate\", type=float, default=1.0,\n help=\"Learning rate. Adam: 0.001 | 0.0001\")\n parser.add_argument(\"--warmup_steps\", type=int, default=0,\n help=\"How many steps we inverse-decay learning.\")\n parser.add_argument(\"--warmup_scheme\", type=str, default=\"t2t\", help=\"\"\"\\\n How to warmup learning rates. Options include:\n t2t: Tensor2Tensor's way, start with lr 100 times smaller, then\n exponentiate until the specified lr.\\\n \"\"\")\n parser.add_argument(\n \"--decay_scheme\", type=str, default=\"luong10\", help=\"\"\"\\\n How we decay learning rate. Options include:\n luong234: after 2/3 num train steps, we start halving the learning rate\n for 4 times before finishing.\n luong5: after 1/2 num train steps, we start halving the learning rate\n for 5 times before finishing.\\\n luong10: after 1/2 num train steps, we start halving the learning rate\n for 10 times before finishing.\\\n \"\"\")\n # initializer\n parser.add_argument(\"--init_op\", type=str, default=\"uniform\",\n help=\"uniform | glorot_normal | glorot_uniform\")\n parser.add_argument(\"--init_weight\", type=float, default=0.1,\n help=(\"for uniform init_op, initialize weights \"\n \"between [-this, this].\")) \n # data\n parser.add_argument(\"--src\", type=str, default=\"zh\",\n help=\"Source suffix, e.g., zh.\")\n parser.add_argument(\"--tgt\", type=str, default=\"en\",\n help=\"Target suffix, e.g., en.\")\n parser.add_argument(\"--train_prefix\", type=str, default=\"corpus_small\",\n help=\"Train prefix, expect files with src/tgt suffixes.\")\n parser.add_argument(\"--dev_prefix\", type=str, default=\"dev\",\n help=\"Dev prefix, expect files with src/tgt suffixes.\")\n parser.add_argument(\"--test_prefix\", type=str, default=\"test\",\n help=\"Test prefix, expect files with src/tgt suffixes.\")\n parser.add_argument(\"--out_dir\", type=str, default=\"model\",\n help=\"Store model files.\") \n # vocab\n parser.add_argument(\"--sos\", type=str, default=\"<s>\",\n help=\"Start-of-sentence symbol.\")\n parser.add_argument(\"--eos\", type=str, default=\"</s>\",\n help=\"End-of-sentence symbol.\")\n parser.add_argument(\"--share_vocab\", type=\"bool\", nargs=\"?\", const=True,\n default=False,\n help=\"\"\"\\\n Whether to use the source vocab and embeddings for both source and\n target.\\\n \"\"\")\n parser.add_argument(\"--vocab_prefix\", type=str, default=\"vocab_small\", help=\"\"\"\\\n Vocab prefix, expect files with src/tgt suffixes.If None, extract from\n train files.\\\n \"\"\")\n parser.add_argument(\"--embed_prefix\", type=str, default=None, help=\"\"\"\\\n Pretrained embedding prefix, expect files with src/tgt suffixes.\n The embedding files should be Glove formated txt files.\\\n \"\"\")\n # sequence lengths\n parser.add_argument(\"--src_max_len\", type=int, default=50,\n help=\"Max length of src sequences during training.\")\n parser.add_argument(\"--tgt_max_len\", type=int, default=50,\n help=\"Max length of tgt sequences during training.\")\n parser.add_argument(\"--src_max_len_infer\", type=int, default=None,\n help=\"Max length of src sequences during inference.\")\n parser.add_argument(\"--tgt_max_len_infer\", type=int, default=None,\n help=\"\"\"\\\n Max length of tgt sequences during inference. Also use to restrict the\n maximum decoding length.\\\n \"\"\") \n # default settings works well (rarely need to change)\n parser.add_argument(\"--unit_type\", type=str, default=\"lstm\",\n help=\"lstm | gru | layer_norm_lstm | nas\")\n parser.add_argument(\"--forget_bias\", type=float, default=1.0,\n help=\"Forget bias for BasicLSTMCell.\")\n parser.add_argument(\"--dropout\", type=float, default=0.2,\n help=\"Dropout rate (not keep_prob)\")\n parser.add_argument(\"--max_gradient_norm\", type=float, default=5.0,\n help=\"Clip gradients to this norm.\")\n parser.add_argument(\"--source_reverse\", type=\"bool\", nargs=\"?\", const=False,\n default=False, help=\"Reverse source sequence.\")\n parser.add_argument(\"--batch_size\", type=int, default=64, help=\"Batch size.\") \n parser.add_argument(\"--steps_per_stats\", type=int, default=100,\n help=(\"How many training steps to do per stats logging.\"\n \"Save checkpoint every 10x steps_per_stats\"))\n parser.add_argument(\"--num_buckets\", type=int, default=5,\n help=\"Put data into similar-length buckets.\") \n # misc\n parser.add_argument(\"--epochs\", type=int, default=5,\n help=\"epoch number\")\n parser.add_argument(\"--steps_per_external_eval\", type=int, default=None,\n help=\"\"\"\\\n How many training steps to do per external evaluation. Automatically set\n based on data if None.\\\n \"\"\")\n parser.add_argument(\"--random_seed\", type=int, default=23,\n help=\"Random seed (>0, set a specific seed).\")\n parser.add_argument(\"--num_keep_ckpts\", type=int, default=5,\n help=\"Max number of checkpoints to keep.\")\n parser.add_argument(\"--avg_ckpts\", type=\"bool\", nargs=\"?\",\n const=True, default=False, help=(\"\"\"\\\n Average the last N checkpoints for external evaluation.\n N can be controlled by setting --num_keep_ckpts.\\\n \"\"\"))\n # inference\n parser.add_argument(\"--ckpt\", type=str, default=\"\",\n help=\"Checkpoint file to load a model for inference.\")\n parser.add_argument(\"--inference_input_file\", type=str, default=None,\n help=\"Set to the text to decode.\")\n parser.add_argument(\"--infer_batch_size\", type=int, default=64,\n help=\"Batch size for inference mode.\")\n parser.add_argument(\"--inference_output_file\", type=str, default=None,\n help=\"Output file to store decoding results.\")\n parser.add_argument(\"--inference_ref_file\", type=str, default=None,\n help=(\"\"\"\\\n Reference file to compute evaluation scores (if provided).\\\n \"\"\"))\n parser.add_argument(\"--beam_width\", type=int, default=10,\n help=(\"\"\"\\\n beam width when using beam search decoder. If 0 (default), use standard\n decoder with greedy helper.\\\n \"\"\"))\n parser.add_argument(\"--length_penalty_weight\", type=float, default=1.0,\n help=\"Length penalty for beam search.\")\n parser.add_argument(\"--sampling_temperature\", type=float,\n default=0.0,\n help=(\"\"\"\\\n Softmax sampling temperature for inference decoding, 0.0 means greedy\n decoding. This option is ignored when using beam search.\\\n \"\"\"))\n parser.add_argument(\"--num_translations_per_input\", type=int, default=1,\n help=(\"\"\"\\\n Number of translations generated for each sentence. This is only used for\n inference.\\\n \"\"\"))\n\n return parser.parse_args()\n\ndef create_hparams(args):\n \"\"\"\n Create training hparams.\n \"\"\"\n\n return tf.contrib.training.HParams(\n # Data\n src=args.src,\n tgt=args.tgt,\n train_prefix=args.train_prefix,\n dev_prefix=args.dev_prefix,\n test_prefix=args.test_prefix,\n vocab_prefix=args.vocab_prefix,\n embed_prefix=args.embed_prefix,\n out_dir=args.out_dir,\n\n # Networks\n embed_size=args.embed_size,\n hidden_size=args.hidden_size,\n num_layers=args.num_layers,\n attention=args.attention,\n output_attention=args.output_attention,\n dropout=args.dropout,\n unit_type=args.unit_type,\n\n # Train\n optimizer=args.optimizer,\n batch_size=args.batch_size,\n init_op=args.init_op,\n init_weight=args.init_weight,\n max_gradient_norm=args.max_gradient_norm,\n learning_rate=args.learning_rate,\n warmup_steps = args.warmup_steps,\n warmup_scheme=args.warmup_scheme,\n decay_scheme=args.decay_scheme,\n\n # Data constraints\n num_buckets=args.num_buckets,\n src_max_len=args.src_max_len,\n tgt_max_len=args.tgt_max_len,\n source_reverse=args.source_reverse,\n\n # Inference\n ckpt=args.ckpt,\n src_max_len_infer=args.src_max_len_infer,\n tgt_max_len_infer=args.tgt_max_len_infer,\n infer_batch_size=args.infer_batch_size,\n beam_width=args.beam_width,\n length_penalty_weight=args.length_penalty_weight,\n sampling_temperature=args.sampling_temperature,\n num_translations_per_input=args.num_translations_per_input,\n inference_input_file=args.inference_input_file,\n inference_output_file=args.inference_output_file,\n inference_ref_file=args.inference_ref_file,\n\n # Vocab\n sos=args.sos if args.sos else vocab_utils.SOS,\n eos=args.eos if args.eos else vocab_utils.EOS,\n\n # Misc\n epochs=args.epochs,\n forget_bias=args.forget_bias,\n steps_per_stats=args.steps_per_stats,\n steps_per_external_eval=args.steps_per_external_eval,\n random_seed=args.random_seed,\n share_vocab=args.share_vocab,\n num_keep_ckpts=args.num_keep_ckpts,\n avg_ckpts=args.avg_ckpts\n )\n\ndef create_or_load_hparams(\n out_dir, default_hparams):\n \"\"\"\n Create hparams or load hparams from out_dir.\n \"\"\"\n\n hparams = utils.load_hparams(out_dir)\n if not hparams:\n hparams = default_hparams\n\n hparams.add_hparam(\"best_bleu\", 0)\n best_bleu_dir = os.path.join(out_dir, \"best_bleu\")\n hparams.add_hparam(\"best_bleu_dir\", best_bleu_dir)\n os.makedirs(best_bleu_dir)\n hparams.add_hparam(\"avg_best_bleu\", 0)\n best_bleu_dir = os.path.join(hparams.out_dir, \"avg_best_bleu\")\n hparams.add_hparam(\"avg_best_bleu_dir\", os.path.join(hparams.out_dir, \"avg_best_bleu\"))\n os.makedirs(best_bleu_dir)\n\n # Set num_train_steps\n train_src_file = \"%s.%s\" % (hparams.train_prefix, hparams.src)\n train_tgt_file = \"%s.%s\" % (hparams.train_prefix, hparams.tgt)\n with open(train_src_file, 'r', encoding='utf-8') as f:\n train_src_steps = len(f.readlines())\n with open(train_tgt_file, 'r', encoding='utf-8') as f:\n train_tgt_steps = len(f.readlines())\n hparams.add_hparam(\"num_train_steps\", min([train_src_steps, train_tgt_steps]) * hparams.epochs)\n\n # Set encoder/decoder layers\n hparams.add_hparam(\"num_encoder_layers\", hparams.num_layers)\n hparams.add_hparam(\"num_decoder_layers\", hparams.num_layers)\n\n # Set residual layers\n num_encoder_residual_layers = 0\n num_decoder_residual_layers = 0\n if hparams.num_encoder_layers > 1:\n num_encoder_residual_layers = hparams.num_encoder_layers - 1\n if hparams.num_decoder_layers > 1:\n num_decoder_residual_layers = hparams.num_decoder_layers - 1\n\n # The first unidirectional layer (after the bi-directional layer) in\n # the GNMT encoder can't have residual connection due to the input is\n # the concatenation of fw_cell and bw_cell's outputs.\n num_encoder_residual_layers = hparams.num_encoder_layers - 2\n\n # Compatible for GNMT models\n if hparams.num_encoder_layers == hparams.num_decoder_layers:\n num_decoder_residual_layers = num_encoder_residual_layers\n \n hparams.add_hparam(\"num_encoder_residual_layers\", num_encoder_residual_layers)\n hparams.add_hparam(\"num_decoder_residual_layers\", num_decoder_residual_layers)\n\n # Vocab\n # Get vocab file names first\n if hparams.vocab_prefix:\n src_vocab_file = hparams.vocab_prefix + \".\" + hparams.src\n tgt_vocab_file = hparams.vocab_prefix + \".\" + hparams.tgt\n else:\n raise ValueError(\"hparams.vocab_prefix must be provided.\")\n # Source vocab\n src_vocab_size, src_vocab_file = vocab_utils.check_vocab(\n src_vocab_file,\n hparams.out_dir,\n sos=hparams.sos,\n eos=hparams.eos,\n unk=vocab_utils.UNK)\n # Target vocab\n if hparams.share_vocab:\n utils.log(\"Using source vocab for target\")\n tgt_vocab_file = src_vocab_file\n tgt_vocab_size = src_vocab_size\n else:\n tgt_vocab_size, tgt_vocab_file = vocab_utils.check_vocab(\n tgt_vocab_file,\n hparams.out_dir,\n sos=hparams.sos,\n eos=hparams.eos,\n unk=vocab_utils.UNK)\n hparams.add_hparam(\"src_vocab_size\", src_vocab_size)\n hparams.add_hparam(\"tgt_vocab_size\", tgt_vocab_size)\n hparams.add_hparam(\"src_vocab_file\", src_vocab_file)\n hparams.add_hparam(\"tgt_vocab_file\", tgt_vocab_file)\n\n # Pretrained Embeddings:\n hparams.add_hparam(\"src_embed_file\", \"\")\n hparams.add_hparam(\"tgt_embed_file\", \"\")\n if hparams.embed_prefix:\n src_embed_file = hparams.embed_prefix + \".\" + hparams.src\n tgt_embed_file = hparams.embed_prefix + \".\" + hparams.tgt\n if os.path.exists(src_embed_file):\n hparams.src_embed_file = src_embed_file\n if os.path.exists(tgt_embed_file):\n hparams.tgt_embed_file = tgt_embed_file\n\n\n # Save HParams\n utils.save_hparams(out_dir, hparams)\n\n return hparams\n\ndef run(args):\n if not os.path.exists(args.out_dir):\n os.makedirs(args.out_dir)\n\n logger = logging.getLogger(\"nmt_zh\")\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n file_handler = logging.FileHandler(os.path.join(args.out_dir, \"log\"))\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n\n default_hparams = create_hparams(args)\n # Load hparams.\n hparams = create_or_load_hparams(\n default_hparams.out_dir, default_hparams)\n\n utils.log('Running with hparams : {}'.format(hparams))\n\n random_seed = hparams.random_seed\n if random_seed is not None and random_seed > 0:\n utils.log('Set random seed to {}'.format(random_seed))\n random.seed(random_seed)\n np.random.seed(random_seed) \n tf.set_random_seed(random_seed)\n\n if hparams.inference_input_file:\n utils.log('Inferring ...')\n # infer\n trans_file = hparams.inference_output_file\n ckpt = hparams.ckpt\n if not ckpt:\n ckpt = tf.train.latest_checkpoint(hparams.out_dir)\n utils.log('Use checkpoint: {}'.format(ckpt))\n utils.log('Start infer sentence in {}, output saved to {} ...'.format(\n hparams.inference_input_file, trans_file))\n infer.infer(ckpt, hparams.inference_input_file, trans_file, hparams)\n\n # eval\n ref_file = hparams.inference_ref_file\n if ref_file and os.path.exists(trans_file):\n utils.log('Evaluating infer output with reference in {} ...'.format(\n ref_file))\n score = evaluation_utils.evaluate(\n ref_file,\n trans_file,\n 'BLEU')\n utils.log(\"BLEU: %.1f\" % (score, ))\n else:\n utils.log('Training ...')\n train.train(hparams)\n\n\nif __name__ == '__main__':\n args = parse_args()\n run(args)\n", "repo_name": "zsy23/chinese_nlp", "sub_path": "machine_translation/nmt_zh/nmt_zh.py", "file_name": "nmt_zh.py", "file_ext": "py", "file_size_in_byte": 17399, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 26, "dataset": "github-code", "pt": "94", "api": [{"api_name": "os.environ", "line_number": 2, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 3, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 4, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 20, "usage_type": "call"}, {"api_name": "tensorflow.contrib.training.HParams", "line_number": 183, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 183, "usage_type": "attribute"}, {"api_name": "utils.vocab_utils.SOS", "line_number": 234, "usage_type": "attribute"}, {"api_name": "utils.vocab_utils", "line_number": 234, "usage_type": "name"}, {"api_name": "utils.vocab_utils.EOS", "line_number": 235, "usage_type": "attribute"}, {"api_name": "utils.vocab_utils", "line_number": 235, "usage_type": "name"}, {"api_name": "utils.misc_utils.load_hparams", "line_number": 254, "usage_type": "call"}, {"api_name": "utils.misc_utils", "line_number": 254, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 259, "usage_type": "call"}, {"api_name": "os.path", "line_number": 259, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 261, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 263, "usage_type": "call"}, {"api_name": "os.path", "line_number": 263, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 264, "usage_type": "call"}, {"api_name": "os.path", "line_number": 264, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 265, "usage_type": "call"}, {"api_name": "utils.vocab_utils.check_vocab", "line_number": 308, "usage_type": "call"}, {"api_name": "utils.vocab_utils", "line_number": 308, "usage_type": "name"}, {"api_name": "utils.vocab_utils.UNK", "line_number": 313, "usage_type": "attribute"}, {"api_name": "utils.vocab_utils", "line_number": 313, "usage_type": "name"}, {"api_name": "utils.misc_utils.log", "line_number": 316, "usage_type": "call"}, {"api_name": "utils.misc_utils", "line_number": 316, "usage_type": "name"}, {"api_name": "utils.vocab_utils.check_vocab", "line_number": 320, "usage_type": "call"}, {"api_name": "utils.vocab_utils", "line_number": 320, "usage_type": "name"}, {"api_name": "utils.vocab_utils.UNK", "line_number": 325, "usage_type": "attribute"}, {"api_name": "utils.vocab_utils", "line_number": 325, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 337, "usage_type": "call"}, {"api_name": "os.path", "line_number": 337, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 339, "usage_type": "call"}, {"api_name": "os.path", "line_number": 339, "usage_type": "attribute"}, {"api_name": "utils.misc_utils.save_hparams", "line_number": 344, "usage_type": "call"}, {"api_name": "utils.misc_utils", "line_number": 344, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 349, "usage_type": "call"}, {"api_name": "os.path", "line_number": 349, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 350, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 352, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 353, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 354, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 355, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 355, "usage_type": "call"}, {"api_name": "os.path", "line_number": 355, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 356, "usage_type": "attribute"}, {"api_name": "utils.misc_utils.log", "line_number": 365, "usage_type": "call"}, {"api_name": "utils.misc_utils", "line_number": 365, "usage_type": "name"}, {"api_name": "utils.misc_utils.log", "line_number": 369, "usage_type": "call"}, {"api_name": "utils.misc_utils", "line_number": 369, "usage_type": "name"}, {"api_name": "random.seed", "line_number": 370, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 371, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 371, "usage_type": "attribute"}, {"api_name": "tensorflow.set_random_seed", "line_number": 372, "usage_type": "call"}, {"api_name": "utils.misc_utils.log", "line_number": 375, "usage_type": "call"}, {"api_name": "utils.misc_utils", "line_number": 375, "usage_type": "name"}, {"api_name": "tensorflow.train.latest_checkpoint", "line_number": 380, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 380, "usage_type": "attribute"}, {"api_name": "utils.misc_utils.log", "line_number": 381, "usage_type": "call"}, {"api_name": "utils.misc_utils", "line_number": 381, "usage_type": "name"}, {"api_name": "utils.misc_utils.log", "line_number": 382, "usage_type": "call"}, {"api_name": "utils.misc_utils", "line_number": 382, "usage_type": "name"}, {"api_name": "infer.infer", "line_number": 384, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 388, "usage_type": "call"}, {"api_name": "os.path", "line_number": 388, "usage_type": "attribute"}, {"api_name": "utils.misc_utils.log", "line_number": 389, "usage_type": "call"}, {"api_name": "utils.misc_utils", "line_number": 389, "usage_type": "name"}, {"api_name": "utils.evaluation_utils.evaluate", "line_number": 391, "usage_type": "call"}, {"api_name": "utils.evaluation_utils", "line_number": 391, "usage_type": "name"}, {"api_name": "utils.misc_utils.log", "line_number": 395, "usage_type": "call"}, {"api_name": "utils.misc_utils", "line_number": 395, "usage_type": "name"}, {"api_name": "utils.misc_utils.log", "line_number": 397, "usage_type": "call"}, {"api_name": "utils.misc_utils", "line_number": 397, "usage_type": "name"}, {"api_name": "train.train", "line_number": 398, "usage_type": "call"}]} +{"seq_id": "3357889181", "text": "from django import template\nfrom django.urls import reverse\nfrom django.template import Context\nfrom django.db import connection\n\nfrom menu_app.models import MenuItem\n\nregister = template.Library()\n\n\n@register.simple_tag(takes_context=True)\ndef draw_menu(context: Context, menu_name: str) -> str:\n \"\"\"\n template tag для вывода дерева меню\n :param context: (Context)\n :param menu_name: (str) название меню, которое необходимо загрузить\n :return:\n \"\"\"\n request = context[\"request\"]\n current_url = request.path\n\n menu_items = MenuItem.objects.filter(menu__title=menu_name)\n menu_items_list = [item for item in menu_items.values()]\n menu_tree = build_tree_menu(menu_items_list[:], current_url)\n\n print(\"Количество обращений к БД: \", len(connection.queries))\n\n return _render_menu(menu_tree, current_url)\n\n\ndef _render_menu(menu_items: list[dict], current_url: str) -> str:\n \"\"\"\n Вспомогательная функция, генерирует HTML для отображения меню\n :param menu_items: (list[dict]) список пунктов меню (дерево меню)\n :param current_url: (str) url открытой страницы\n :return: (str) текст в формате html\n \"\"\"\n html = \"<ul>\"\n for item in menu_items:\n html += \"<li>\"\n item_url = _get_menu_item_url(item)\n\n if current_url == item_url:\n html += (\n f'<a style=\"text-decoration: none; '\n f\"font-family: monospace; \"\n f\"border-top; \"\n f\"border-width: thin; \"\n f\"border-style: dotted; \"\n f'color: red\" '\n f'href=\"{item_url} \">'\n f\"{item['name']}\"\n f\"</a>\"\n )\n else:\n html += (\n f'<a style=\"text-decoration: none; '\n f'font-family: monospace;\" '\n f'href=\"{item_url}\">{item[\"name\"]}</a>'\n )\n\n if \"sub_menu\" in item:\n html += _render_menu(item[\"sub_menu\"], current_url)\n html += \"</li>\"\n\n html += \"</ul>\"\n\n return html\n\n\ndef _get_menu_item_url(menu_item: dict) -> str:\n \"\"\"\n Вспомогательная функция возвращает адрес на который направляет пункт меню\n :param menu_item: (dict) элемент меню\n :return: (str) адрес url\n \"\"\"\n if menu_item[\"url\"].startswith(\"/\"):\n return menu_item[\"url\"]\n else:\n return reverse(menu_item[\"url\"])\n\n\ndef build_tree_menu(\n data_list: list[dict], current_url: str, parent_id: int = None\n) -> list[dict]:\n \"\"\"\n Функция построения дерева меню до текущего пункта (определяется адресом открытой страницы)\n :param data_list: (list[dict]) список пунктов меню\n :param current_url: (str) адрес открытой страницы\n :param parent_id: (int) идентификатор вышестоящего уровня меню\n :return: (list[dict]) дерево меню\n \"\"\"\n menu_tree = list()\n for item in data_list:\n if item[\"parent_id\"] == parent_id:\n children = build_tree_menu(data_list, current_url, item[\"id\"])\n if children and (item[\"url\"] in current_url):\n item[\"sub_menu\"] = children\n menu_tree.append(item)\n return menu_tree\n", "repo_name": "AlexMuller45/UpTrader", "sub_path": "menu_app/templatetags/menu_tags.py", "file_name": "menu_tags.py", "file_ext": "py", "file_size_in_byte": 3564, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "django.template.Library", "line_number": 8, "usage_type": "call"}, {"api_name": "django.template", "line_number": 8, "usage_type": "name"}, {"api_name": "django.template.Context", "line_number": 12, "usage_type": "name"}, {"api_name": "menu_app.models.MenuItem.objects.filter", "line_number": 22, "usage_type": "call"}, {"api_name": "menu_app.models.MenuItem.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "menu_app.models.MenuItem", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.connection.queries", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.db.connection", "line_number": 26, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 80, "usage_type": "call"}]} +{"seq_id": "2414471259", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# ## question 1\n\n# In[1]:\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# \n\n# In[2]:\n\n\nimport numpy as np\ndef VectorLength(x, y):\n v = np.sqrt(x*x + y*y)\n return v\n\n\n\n# In[3]:\n\n\nprint(VectorLength(5,6))\n\n\n# In[4]:\n\n\nprint(VectorLength(-1,5))\n\n\n# In[5]:\n\n\nr = np.sin(1)\ny = np.round(r, 3)\nprint(y)\nprint(\"sin(1) =\" , y)\n\n\n# ## Question 2\n\n# In[6]:\n\n\n# function to check if a number is prime\ndef IsPrime(n):\n i = 2\n while i < np.sqrt(n):\n # For each i check if it divides n\n if(n % i ==0):\n return 0\n i += 1\n # if no divisors are found, the number is prime\n return 1\n \n\n\n# In[7]:\n\n\nprint(IsPrime(1))\n\n\n# In[8]:\n\n\nprint(IsPrime(2))\n\n\n# In[9]:\n\n\nprint(IsPrime(3))\n\n\n# In[10]:\n\n\nprint(IsPrime(4))\n\n\n# In[11]:\n\n\nprint(IsPrime(5))\n\n\n# In[12]:\n\n\nprint(IsPrime(6))\n\n\n# In[13]:\n\n\nprint(IsPrime(7))\n\n\n# In[14]:\n\n\nprint(IsPrime(8))\n\n\n# In[15]:\n\n\nprint(IsPrime(9))\n\n\n# In[16]:\n\n\nprint(IsPrime(10))\n\n\n# it does not accuratley check if a number is prime as 2 and 3 are prime, it also returns that 9 is prime and its not.\n\n# In[17]:\n\n\n# function to check if a number is prime\ndef IsPrime1(n):\n if n == 1:\n return 0\n i = 2\n while i <= np.sqrt(n):\n # For each i check if it divides n\n if(n % i ==0):\n return 0\n i += 1\n # if no divisors are found, the number is prime\n return 1\n\n\n# In[18]:\n\n\nprint(IsPrime1(1))\n\n\n# In[19]:\n\n\nprint(IsPrime1(4))\n\n\n# In[31]:\n\n\nprint(IsPrime1(9))\n\n\n# I added a case to fix it\n\n# In[20]:\n\n\ndef countprime(n):\n i = 1\n count = 0\n while i < n:\n if IsPrime1(i) == 1:\n count = count + 1\n i = i+ 1 \n return count \n\n \n\n\n# In[21]:\n\n\ncountprime(10)\n\n\n# In[22]:\n\n\ncountprime(1000)\n\n\n# # question 2G\n\n# In[23]:\n\n\nx = np.arange(0,40,1)\nPi = np.zeros(40)\ni=0\nwhile i < np.size(Pi):\n Pi[i] = countprime(i)\n i += 1\n\n\n# In[24]:\n\n\nplt.plot(x, Pi, 'ro')\nplt.xlabel(\"x\")\nplt.ylabel(\"Pi(x)\")\nplt.grid(True)\n\n\n# ## Question 3\n\n# In[25]:\n\n\ndata = np.loadtxt(\"PowerLawOrExponential (4).txt\")\n\n\n# In[26]:\n\n\nxdata = data[:,0]\nxdata\n\n\n# In[27]:\n\n\nydata = data[:,1]\nydata\n\n\n# In[28]:\n\n\nplt.plot((xdata), np.log(ydata))\nplt.grid(True)\nplt.title(\"log plot\")\nplt.xlabel(\"X\")\nplt.ylabel(\"Y\")\n\n\n# In[29]:\n\n\nplt.plot(np.log(xdata), np.log(ydata))\nplt.grid(True)\nplt.title(\"log plot\")\nplt.xlabel(\"X\")\nplt.ylabel(\"Y\")\n\n\n# yes, the data is discrete \n\n# In[30]:\n\n\nn=4/2\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n", "repo_name": "aislingmcgloin/ACM20030", "sub_path": "ACM20030-Assignment-1 (1).py", "file_name": "ACM20030-Assignment-1 (1).py", "file_ext": "py", "file_size_in_byte": 2489, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "numpy.sqrt", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.size", "line_number": 198, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 206, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 206, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 207, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 207, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 208, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 208, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 209, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 209, "usage_type": "name"}, {"api_name": "numpy.loadtxt", "line_number": 217, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 237, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 237, "usage_type": "name"}, {"api_name": "numpy.log", "line_number": 237, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 238, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 238, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 239, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 239, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 240, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 240, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 241, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 241, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 247, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 247, "usage_type": "name"}, {"api_name": "numpy.log", "line_number": 247, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 248, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 248, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 249, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 249, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 250, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 250, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 251, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 251, "usage_type": "name"}]} +{"seq_id": "17344259102", "text": "\n\nimport sys\nimport pickle\npickle_in = open(\"PCFG\",\"rb\")\ngrammar = pickle.load(pickle_in)\n\n\n# CKY Algo\n\nimport sys\nfrom nltk.parse import ViterbiParser\nfrom nltk import tokenize\nfrom nltk.parse import pchart\n\n\n\n\n# Tokenize the sentence.\ntokens=sys.argv[1:]\n\nparser = ViterbiParser(grammar)\n\n\n# Replacing unknown words with UNK....\n\nreplace_with_UNK_token = []\nfor i,item in enumerate(tokens):\n try:\n grammar.check_coverage([item])\n except:\n replace_with_UNK_token.append(tokens[i])\n tokens[i] = 'UNK'\n\n\ntrees = parser.parse_all(tokens)\nfor tree in trees:\n pass\n\n\n\nUNK_str = tree.__str__()\n\noutput_parse= UNK_str\nfor i in replace_with_UNK_token:\n output_parse = output_parse.replace(\"UNK\",i,1)\n \nprint(output_parse)\n\n", "repo_name": "sumangpt9/NLU-Assignment-3", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 752, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "pickle.load", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 20, "usage_type": "attribute"}, {"api_name": "nltk.parse.ViterbiParser", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "31581191633", "text": "from src import spoq\nimport time\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom src import parser\nimport argparse\n\n\n\nif __name__ == \"__main__\" :\n\n argsparser = argparse.ArgumentParser()\n\n data = parser.DataGenerator(500,20,5)\n argsparser.add_argument(\"-s\", \"--simulation\",action='store_true', help=\"If True the algorithm will run on simulated data\")\n argsparser.add_argument(\"-nsample\", \"--n\",default=500,type =int, help=\"The length of x\")\n argsparser.add_argument(\"-npeak\", \"--np\",default=20,type =int, help=\"The length of the values different than 0 in x\")\n argsparser.add_argument(\"-peakw\", \"--pw\",default=5,type =int, help=\"The peak width\")\n args = argsparser.parse_args()\n\n if args.simulation :\n data = parser.DataGenerator(args.n,args.np,args.pw)\n else :\n data = parser.DataReader()\n\n # Display results\n solver = spoq.SPOQ(x=data.xtrue,y=data.y,K=data.K,sigma=data.sigma,noise=data.noise)\n print(\"Running TR-VMFB algorithm on SPOQ penalty with p = {0!s} and q = {1!s}\\n\".format(solver.p,solver.q))\n start_time = time.time()\n solver.run()\n end_time = time.time()\n print(\"Reconstruction in {0!s} iterations\\n\".format(len(solver.Time)))\n print(\"SNR = {0!s}\\n\".format(-10*math.log10(np.sum((solver.xtrue-solver.xk)**2)/np.sum(solver.xtrue**2))))\n print(\"Reconstruction time is {0!s} s.\".format(np.sum(solver.Time)))\n\n # Plot results\n x_time = np.cumsum(solver.Time)\n x_time = np.insert(x_time,0,0)\n fig, ax = plt.subplots()\n ax.plot(solver.xtrue,\"ro\",label = \"Original signal\")\n ax.plot(solver.xk,\"bo\", label = \"Estimated signal\")\n ax.legend()\n ax.set_title(\"Reconstruction results\")\n\n fig2, ax2 = plt.subplots()\n ax2.plot(x_time,solver.mysnr,'-k',label=\"TR-VMFB\")\n ax2.set_title(\"Algorithm convergence\")\n ax2.set_xlabel(\"Time (s)\")\n ax2.set_ylabel(\"SNR (dB)\")\n ax2.legend()\n plt.show()\n \n\n\n\n \n\n \n\n\n\n\n\n\n ", "repo_name": "anaslaaroussi1/spoq", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1952, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "94", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 13, "usage_type": "call"}, {"api_name": "src.parser.DataGenerator", "line_number": 15, "usage_type": "call"}, {"api_name": "src.parser", "line_number": 15, "usage_type": "name"}, {"api_name": "src.parser.DataGenerator", "line_number": 23, "usage_type": "call"}, {"api_name": "src.parser", "line_number": 23, "usage_type": "name"}, {"api_name": "src.parser.DataReader", "line_number": 25, "usage_type": "call"}, {"api_name": "src.parser", "line_number": 25, "usage_type": "name"}, {"api_name": "src.spoq.SPOQ", "line_number": 28, "usage_type": "call"}, {"api_name": "src.spoq", "line_number": 28, "usage_type": "name"}, {"api_name": "time.time", "line_number": 30, "usage_type": "call"}, {"api_name": "time.time", "line_number": 32, "usage_type": "call"}, {"api_name": "math.log10", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}]} +{"seq_id": "1402498647", "text": "import tensorflow as tf\nimport numpy as np\nfrom keras.models import Model\nfrom keras.layers import Dense, BatchNormalization\nfrom keras import Sequential\nfrom tensorflow.keras.utils import to_categorical\n\nclass AutoEncoder(Model):\n def __init__(self, M, num_channels, rate, Eb_N0):\n super(AutoEncoder, self).__init__()\n self.variance = np.sqrt(1/(2*rate*Eb_N0))\n self.n_channels = num_channels\n\n self.encoder = Sequential([\n Dense(M, activation='relu', name=\"encoder_layer1\"),\n Dense(num_channels, activation='linear', name=\"encoder_layer2\"),\n ])\n\n self.decoder = Sequential([\n Dense(M, activation='relu', name=\"decoder_layer1\"),\n Dense(M, activation='softmax', name=\"decoder_layer2\"),\n ])\n\n def call(self, inputs):\n one_hat_vector = to_categorical(inputs, num_classes=16)\n transmitted_signal = self.encoder(one_hat_vector)\n transmitted_signal = np.sqrt(self.n_channels)*tf.math.l2_normalize(transmitted_signal, axis=-1)\n\n noise = self.variance * np.random.randn(transmitted_signal.shape[0], self.n_channels)\n channel_output = transmitted_signal + noise\n\n output = self.decoder(channel_output)\n\n return output", "repo_name": "JUNGJAEHOON-98/An-Introduction-to-Deep-Learning-for-the-Physical-Layer", "sub_path": "models/autoencoder.py", "file_name": "autoencoder.py", "file_ext": "py", "file_size_in_byte": 1258, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "97", "api": [{"api_name": "keras.models.Model", "line_number": 8, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 11, "usage_type": "call"}, {"api_name": "keras.Sequential", "line_number": 14, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 15, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 16, "usage_type": "call"}, {"api_name": "keras.Sequential", "line_number": 19, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 20, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 21, "usage_type": "call"}, {"api_name": "tensorflow.keras.utils.to_categorical", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.math.l2_normalize", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.math", "line_number": 27, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 29, "usage_type": "attribute"}]} +{"seq_id": "26907569639", "text": "\"\"\"\nThis module defines a model element to detect fixed strings from a list of words.\n\nThis program is free software: you can redistribute it and/or modify it under\nthe terms of the GNU General Public License as published by the Free Software\nFoundation, either version 3 of the License, or (at your option) any later\nversion.\nThis program is distributed in the hope that it will be useful, but WITHOUT\nANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\nFOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.\nYou should have received a copy of the GNU General Public License along with\nthis program. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"\n\nimport logging\nfrom aminer import AMinerConfig\nfrom aminer.parsing import ModelElementInterface\nfrom aminer.parsing.MatchElement import MatchElement\n\n\nclass FixedWordlistDataModelElement(ModelElementInterface):\n \"\"\"\n This class defines a model element to detect fixed strings from a list of words.\n The match will return the position of the word in the search list, thus the sorting of the list is important. Apart from that, the\n wordlist must not contain any words, that are identical to the beginning of words later in the list. In that case, the longer match\n could never be detected.\n \"\"\"\n\n def __init__(self, path_id, wordlist):\n \"\"\"\n Create the model element.\n @param wordlist the list of words to search for. If it does not fulfill the sorting criteria mentioned in the class documentation,\n an Exception will be raised.\n \"\"\"\n self.path_id = path_id\n self.wordlist = wordlist\n for test_pos, ref_word in enumerate(wordlist):\n for test_word in wordlist[test_pos + 1:]:\n if test_word.startswith(ref_word):\n msg = 'Word %s would be shadowed by word %s at lower position' % (repr(test_word), repr(ref_word))\n logging.getLogger(AMinerConfig.DEBUG_LOG_NAME).error(msg)\n raise Exception(msg)\n\n def get_id(self):\n \"\"\"Get the element ID.\"\"\"\n return self.path_id\n\n def get_child_elements(self):\n \"\"\"\n Get all possible child model elements of this element.\n @return None as there are no children of this element.\n \"\"\"\n return None\n\n def get_match_element(self, path, match_context):\n \"\"\"@return None when there is no match, MatchElement otherwise.\"\"\"\n data = match_context.match_data\n match_data = None\n word_pos = 0\n for word in self.wordlist:\n if data.startswith(word):\n match_data = word\n break\n word_pos += 1\n\n if match_data is None:\n return None\n\n match_context.update(match_data)\n return MatchElement(\"%s/%s\" % (path, self.path_id), match_data, word_pos, None)\n", "repo_name": "sebastinas/logdata-anomaly-miner", "sub_path": "source/root/usr/lib/logdata-anomaly-miner/aminer/parsing/FixedWordlistDataModelElement.py", "file_name": "FixedWordlistDataModelElement.py", "file_ext": "py", "file_size_in_byte": 2900, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "97", "api": [{"api_name": "aminer.parsing.ModelElementInterface", "line_number": 21, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 41, "usage_type": "call"}, {"api_name": "aminer.AMinerConfig.DEBUG_LOG_NAME", "line_number": 41, "usage_type": "attribute"}, {"api_name": "aminer.AMinerConfig", "line_number": 41, "usage_type": "name"}, {"api_name": "aminer.parsing.MatchElement.MatchElement", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "6203398142", "text": "from pathlib import Path\nimport time\nimport configparser\n\nWHOSON_SECTION = \"WHOSON\"\n\nclass Configuration:\n\n def __init__(self):\n self.config_filename = \".whoson.ini\"\n self.config_path = Path.home().joinpath(self.config_filename)\n\n self.config = configparser.ConfigParser()\n if not self.config_path.exists():\n print(f\"No config file, creating one at {self.config_path.absolute()}\")\n\n self.config.add_section(WHOSON_SECTION)\n with self.config_path.open('w') as f:\n self.config.write(f)\n \n with self.config_path.open('r') as f:\n self.config.read_file(f)\n\n def client_info(self, client_info: dict = None) -> dict:\n return {\n \"client_id\": self.config.get(WHOSON_SECTION,\"client_id\"),\n \"client_secret\": self.config.get(WHOSON_SECTION,\"client_secret\")}\n\n def client_id(self, client_id: str):\n self.config.set(WHOSON_SECTION,\"client_id\", client_id)\n self.save_config()\n\n def client_secret(self, client_secret: str):\n self.config.set(WHOSON_SECTION,\"client_secret\", client_secret)\n self.save_config()\n\n def username(self, username: str = None) -> str:\n if username is None:\n return self.config.get(WHOSON_SECTION,\"username\")\n\n self.config.set(WHOSON_SECTION,\"username\", username)\n self.save_config()\n \n def access_token(self, access_token: str = None, exp: int = None) -> str:\n if access_token is None:\n exp = int(self.config.get(WHOSON_SECTION,\"exp\"))\n if time.time() > exp:\n raise ValueError\n\n return self.config.get(WHOSON_SECTION,\"access_token\")\n\n self.config.set(WHOSON_SECTION,\"exp\", str(exp))\n self.config.set(WHOSON_SECTION,\"access_token\", access_token)\n self.save_config()\n\n def save_config(self):\n with open(self.config_path, 'w') as f:\n self.config.write(f)\n ", "repo_name": "vkstrm/whoson", "sub_path": "configuration.py", "file_name": "configuration.py", "file_ext": "py", "file_size_in_byte": 1984, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "pathlib.Path.home", "line_number": 11, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 11, "usage_type": "name"}, {"api_name": "configparser.ConfigParser", "line_number": 13, "usage_type": "call"}, {"api_name": "time.time", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "12898127428", "text": "import os\nimport random\nimport json\nimport asyncio\nimport math\nimport discord\nfrom discord.ext import commands\n\nfrom bot.data import Data\n\n\nclass Fun(commands.Cog):\n def __init__(self, bot, theme_color):\n self.bot = bot\n self.theme_color = theme_color\n self.currently_fighting = []\n self.deleted_msgs = {}\n self.edited_msgs = {}\n self.snipe_limit = 15\n\n data_dir = os.path.join(\n os.path.dirname(os.path.dirname(os.path.realpath(__file__))),\n \"data\",\n )\n\n with open(\n os.path.join(data_dir, \"beanlations.json\"), \"r\"\n ) as beanlations_file:\n self.beanlations = json.load(beanlations_file)\n\n @commands.Cog.listener()\n async def on_message_delete(self, message: discord.Message):\n ch_id_str = str(message.channel.id)\n\n if not message.author.bot:\n if ch_id_str not in self.deleted_msgs:\n self.deleted_msgs[ch_id_str] = []\n\n self.deleted_msgs[ch_id_str].append(message)\n\n if len(self.deleted_msgs[ch_id_str]) > self.snipe_limit:\n self.deleted_msgs[ch_id_str].pop(0)\n\n @commands.Cog.listener()\n async def on_message_edit(self, before, after):\n ch_id_str = str(before.channel.id)\n\n if not before.author.bot:\n if ch_id_str not in self.edited_msgs:\n self.edited_msgs[ch_id_str] = []\n\n self.edited_msgs[ch_id_str].append((before, after))\n\n if len(self.edited_msgs[ch_id_str]) > self.snipe_limit:\n self.edited_msgs[ch_id_str].pop(0)\n\n @commands.command(\n name=\"gamble\",\n aliases=[\"gam\"],\n help=\"Gamble some money to see if you earn more than you spend\",\n )\n async def gamble(self, ctx, amount: int):\n Data.check_user_entry(ctx.author)\n\n Data.c.execute(\n \"SELECT wallet FROM users WHERE id = :user_id\",\n {\"user_id\": ctx.author.id},\n )\n wallet = Data.c.fetchone()[0]\n\n if amount > wallet:\n amount_needed = amount - wallet\n await ctx.send(\n f\"You don't have enough beans for that. You need {amount_needed} more beans.\"\n )\n return\n\n player_roll = random.randint(1, 6)\n dealer_roll = random.randint(1, 6)\n\n if player_roll > dealer_roll:\n amount_won = math.ceil((player_roll - dealer_roll) / 6 * amount)\n elif player_roll < dealer_roll:\n amount_won = -amount\n else:\n amount_won = 0\n\n Data.c.execute(\n \"UPDATE users SET wallet = :new_wallet WHERE id = :user_id\",\n {\"new_wallet\": wallet + amount_won, \"user_id\": ctx.author.id},\n )\n Data.conn.commit()\n\n gamble_embed = discord.Embed(title=\"Gambling Results\")\n\n gamble_embed.add_field(name=\"You rolled\", value=str(player_roll))\n gamble_embed.add_field(name=\"Dealer rolled\", value=str(dealer_roll))\n\n if player_roll > dealer_roll:\n gamble_embed.color = discord.Color.green()\n gamble_embed.set_footer(text=f\"You won {amount_won} beans!\")\n elif player_roll < dealer_roll:\n gamble_embed.color = discord.Color.red()\n gamble_embed.set_footer(text=f\"You lost {abs(amount_won)} beans!\")\n else:\n gamble_embed.color = discord.Color.gold()\n gamble_embed.set_footer(text=\"You won nothing!\")\n\n await ctx.send(embed=gamble_embed)\n\n @commands.command(name=\"fight\", help=\"Pick a fight with someone\")\n async def fight(self, ctx, user: discord.User):\n if ctx.author.id == user.id:\n await ctx.send(\"You can't do that to yourself, smh...\")\n return\n\n if ctx.author in self.currently_fighting:\n await ctx.send(\"You are already fighting someone at the moment...\")\n return\n\n elif user in self.currently_fighting:\n await ctx.send(\n f\"**{user.display_name}** is already fighting someone at the moment...\"\n )\n return\n\n p1_health = 100\n p2_health = 100\n p1_name = ctx.author.display_name\n p2_name = user.display_name\n\n self.currently_fighting.append(ctx.author)\n self.currently_fighting.append(user)\n\n Data.check_user_entry(ctx.author)\n Data.check_user_entry(user)\n\n # Load player powerups\n Data.c.execute(\n \"SELECT powerups FROM users WHERE id = :user_id\",\n {\"user_id\": ctx.author.id},\n )\n p1_powerups = json.loads(Data.c.fetchone()[0])\n Data.c.execute(\n \"SELECT powerups FROM users WHERE id = :user_id\",\n {\"user_id\": user.id},\n )\n p2_powerups = json.loads(Data.c.fetchone()[0])\n\n def check_p1(message):\n return (\n message.author == ctx.author and message.channel == ctx.channel\n )\n\n def check_p2(message):\n return message.author == user and message.channel == ctx.channel\n\n async def end():\n self.currently_fighting.remove(ctx.author)\n self.currently_fighting.remove(user)\n\n if p1_health != 100 and p2_health != 100:\n if len(p1_powerups) > 0:\n Data.c.execute(\n \"UPDATE users SET powerups = :new_powerups WHERE id = :user_id\",\n {\"new_powerups\": \"{}\", \"user_id\": ctx.author.id},\n )\n Data.conn.commit()\n await ctx.send(\n f\"{ctx.author.mention}, you have used up your active powerups.\"\n )\n\n if len(p2_powerups) > 0:\n Data.c.execute(\n \"UPDATE users SET powerups = :new_powerups WHERE id = :user_id\",\n {\"new_powerups\": \"{}\", \"user_id\": user.id},\n )\n Data.conn.commit()\n await ctx.send(\n f\"{user.mention}, you have used up your active powerups.\"\n )\n\n await ctx.send(\n f\"{ctx.author.mention} wants to fight {user.mention}. Let's see how this goes...\"\n )\n\n while True:\n # Player 2 turn\n p2_resp_valid = False\n\n try:\n while not p2_resp_valid:\n await ctx.send(\n f\"{user.mention}, it's your turn! What will you do?\\n`punch`, `defend`, `end`\"\n )\n\n p2_response = (\n await self.bot.wait_for(\n \"message\", check=check_p2, timeout=30\n )\n ).content\n\n if p2_response == \"punch\":\n damage = random.randint(10, 45)\n\n try:\n damage += p2_powerups[\"damage_increase\"]\n except KeyError:\n pass\n\n p1_health -= damage\n p2_resp_valid = True\n\n await ctx.send(\n f\"**{p2_name}** bazooka punched **{p1_name}** and did **{damage}** damage! wHoOaA...\"\n )\n\n elif p2_response == \"defend\":\n heal = random.randint(5, 30)\n p2_health += heal\n p2_resp_valid = True\n\n await ctx.send(\n f\"**{p2_name}** defended and regained **{heal}** health! Proteccshun...\"\n )\n\n elif p2_response == \"end\":\n await ctx.send(\n f\"**{p2_name}** chickened out, spam noob in the chat!\"\n )\n await end()\n return\n\n else:\n await ctx.send(\"Invalid response!\")\n\n except asyncio.TimeoutError:\n await ctx.send(\n f\"**{p2_name}** didn't respond in time what a noob...\"\n )\n await end()\n return\n\n if p1_health <= 0:\n await ctx.send(f\"Wow **{p1_name}** just died. Git gud noooob!\")\n await end()\n return\n else:\n await ctx.send(\n f\"**{p1_name}** is now left with **{p1_health}** health.\"\n )\n\n # Player 1 turn\n p1_resp_valid = False\n\n try:\n while not p1_resp_valid:\n await ctx.send(\n f\"{ctx.author.mention}, it's your turn! What will you do?\\n`punch`, `defend`, `end`\"\n )\n\n p1_response = (\n await self.bot.wait_for(\n \"message\", check=check_p1, timeout=30\n )\n ).content\n\n if p1_response == \"punch\":\n damage = random.randint(10, 45)\n\n try:\n damage += p1_powerups[\"damage_increase\"]\n except KeyError:\n pass\n\n p2_health -= damage\n p1_resp_valid = True\n\n await ctx.send(\n f\"**{p1_name}** bazooka punched **{p2_name}** and did **{damage}** damage! wHoOaA...\"\n )\n\n elif p1_response == \"defend\":\n heal = random.randint(5, 30)\n p1_health += heal\n p1_resp_valid = True\n\n await ctx.send(\n f\"**{p1_name}** defended and regained **{heal}** health! Proteccshun...\"\n )\n\n elif p1_response == \"end\":\n await ctx.send(\n f\"**{p1_name}** chickened out, spam noob in the chat!\"\n )\n await end()\n return\n\n else:\n await ctx.send(\"Invalid response!\")\n\n except asyncio.TimeoutError:\n await ctx.send(\n f\"**{p1_name}** didn't respond in time what a noob...\"\n )\n await end()\n return\n\n if p2_health <= 0:\n await ctx.send(f\"Wow **{p2_name}** just died. Git gud noooob!\")\n await end()\n return\n else:\n await ctx.send(\n f\"**{p2_name}** is now left with **{p2_health}** health.\"\n )\n\n @commands.command(\n name=\"powerups\",\n aliases=[\"power\", \"pu\"],\n help=\"See your currently active powerups\",\n )\n async def powerups(self, ctx):\n Data.check_user_entry(ctx.author)\n\n Data.c.execute(\n \"SELECT powerups FROM users WHERE id = :user_id\",\n {\"user_id\": ctx.author.id},\n )\n powerups = json.loads(Data.c.fetchone()[0])\n powerups_embed = discord.Embed(\n title=f\"{ctx.author.display_name}'s Active Powerups\",\n color=self.theme_color,\n )\n\n for powerup in powerups:\n powerup_name = \" \".join(powerup.split(\"_\")).title()\n powerups_embed.add_field(\n name=powerup_name, value=powerups[powerup]\n )\n\n await ctx.send(embed=powerups_embed)\n\n @commands.command(\n name=\"pray\", help=\"Pray to the Bean Gods by reciting the Beanlations\"\n )\n async def pray(self, ctx, *, prayer=None):\n if prayer is None:\n prayer = random.choice(self.beanlations)\n\n await ctx.send(\n f\"**{ctx.author.display_name}** recites a prayer:\\n*{prayer}*\"\n )\n\n @commands.command(\n name=\"snipe\", aliases=[\"sn\"], help=\"See a recently deleted message\"\n )\n async def snipe(self, ctx, limit: int = 1):\n if limit > self.snipe_limit:\n await ctx.send(f\"Maximum snipe limit is {self.snipe_limit}\")\n return\n\n msgs = self.deleted_msgs[str(ctx.channel.id)][::-1][:limit]\n snipe_embed = discord.Embed(\n title=\"Message Snipe\", color=self.theme_color\n )\n\n for msg in msgs:\n snipe_embed.add_field(\n name=msg.author.display_name, value=msg.content, inline=False\n )\n\n await ctx.send(embed=snipe_embed)\n\n @commands.command(\n name=\"editsnipe\", aliases=[\"esn\"], help=\"See a recently edited message\"\n )\n async def editsnipe(self, ctx, limit: int = 1):\n if limit > self.snipe_limit:\n await ctx.send(f\"Maximum snipe limit is {self.snipe_limit}\")\n return\n\n msgs = self.edited_msgs[str(ctx.channel.id)][::-1][:limit]\n editsnipe_embed = discord.Embed(\n title=\"Edit Snipe\", color=self.theme_color\n )\n\n for msg in msgs:\n editsnipe_embed.add_field(\n name=msg[0].author.display_name,\n value=f\"{msg[0].content} **-->** {msg[1].content}\",\n inline=False,\n )\n\n await ctx.send(embed=editsnipe_embed)\n", "repo_name": "MysteryCoder456/BeanBot", "sub_path": "bot/cogs/fun.py", "file_name": "fun.py", "file_ext": "py", "file_size_in_byte": 13383, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "94", "api": [{"api_name": "discord.ext.commands.Cog", "line_number": 12, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 12, "usage_type": "name"}, {"api_name": "bot.data", "line_number": 14, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 29, "usage_type": "call"}, {"api_name": "discord.Message", "line_number": 32, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.Cog.listener", "line_number": 31, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 31, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 31, "usage_type": "name"}, {"api_name": "discord.ext.commands.Cog.listener", "line_number": 44, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 44, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 44, "usage_type": "name"}, {"api_name": "bot.data.Data.check_user_entry", "line_number": 63, "usage_type": "call"}, {"api_name": "bot.data.Data", "line_number": 63, "usage_type": "name"}, {"api_name": "bot.data.Data.c.execute", "line_number": 65, "usage_type": "call"}, {"api_name": "bot.data.Data.c", "line_number": 65, "usage_type": "attribute"}, {"api_name": "bot.data.Data", "line_number": 65, "usage_type": "name"}, {"api_name": "bot.data.Data.c.fetchone", "line_number": 69, "usage_type": "call"}, {"api_name": "bot.data.Data.c", "line_number": 69, "usage_type": "attribute"}, {"api_name": "bot.data.Data", "line_number": 69, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 78, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 79, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 82, "usage_type": "call"}, {"api_name": "bot.data.Data.c.execute", "line_number": 88, "usage_type": "call"}, {"api_name": "bot.data.Data.c", "line_number": 88, "usage_type": "attribute"}, {"api_name": "bot.data.Data", "line_number": 88, "usage_type": "name"}, {"api_name": "bot.data.Data.conn.commit", "line_number": 92, "usage_type": "call"}, {"api_name": "bot.data.Data.conn", "line_number": 92, "usage_type": "attribute"}, {"api_name": "bot.data.Data", "line_number": 92, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 94, "usage_type": "call"}, {"api_name": "discord.Color.green", "line_number": 100, "usage_type": "call"}, {"api_name": "discord.Color", "line_number": 100, "usage_type": "attribute"}, {"api_name": "discord.Color.red", "line_number": 103, "usage_type": "call"}, {"api_name": "discord.Color", "line_number": 103, "usage_type": "attribute"}, {"api_name": "discord.Color.gold", "line_number": 106, "usage_type": "call"}, {"api_name": "discord.Color", "line_number": 106, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.command", "line_number": 57, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 57, "usage_type": "name"}, {"api_name": "discord.User", "line_number": 112, "usage_type": "attribute"}, {"api_name": "bot.data.Data.check_user_entry", "line_number": 135, "usage_type": "call"}, {"api_name": "bot.data.Data", "line_number": 135, "usage_type": "name"}, {"api_name": "bot.data.Data.check_user_entry", "line_number": 136, "usage_type": "call"}, {"api_name": "bot.data.Data", "line_number": 136, "usage_type": "name"}, {"api_name": "bot.data.Data.c.execute", "line_number": 139, "usage_type": "call"}, {"api_name": "bot.data.Data.c", "line_number": 139, "usage_type": "attribute"}, {"api_name": "bot.data.Data", "line_number": 139, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 143, "usage_type": "call"}, {"api_name": "bot.data.Data.c.fetchone", "line_number": 143, "usage_type": "call"}, {"api_name": "bot.data.Data.c", "line_number": 143, "usage_type": "attribute"}, {"api_name": "bot.data.Data", "line_number": 143, "usage_type": "name"}, {"api_name": "bot.data.Data.c.execute", "line_number": 144, "usage_type": "call"}, {"api_name": "bot.data.Data.c", "line_number": 144, "usage_type": "attribute"}, {"api_name": "bot.data.Data", "line_number": 144, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 148, "usage_type": "call"}, {"api_name": "bot.data.Data.c.fetchone", "line_number": 148, "usage_type": "call"}, {"api_name": "bot.data.Data.c", "line_number": 148, "usage_type": "attribute"}, {"api_name": "bot.data.Data", "line_number": 148, "usage_type": "name"}, {"api_name": "bot.data.Data.c.execute", "line_number": 164, "usage_type": "call"}, {"api_name": "bot.data.Data.c", "line_number": 164, "usage_type": "attribute"}, {"api_name": "bot.data.Data", "line_number": 164, "usage_type": "name"}, {"api_name": "bot.data.Data.conn.commit", "line_number": 168, "usage_type": "call"}, {"api_name": "bot.data.Data.conn", "line_number": 168, "usage_type": "attribute"}, {"api_name": "bot.data.Data", "line_number": 168, "usage_type": "name"}, {"api_name": "bot.data.Data.c.execute", "line_number": 174, "usage_type": "call"}, {"api_name": "bot.data.Data.c", "line_number": 174, "usage_type": "attribute"}, {"api_name": "bot.data.Data", "line_number": 174, "usage_type": "name"}, {"api_name": "bot.data.Data.conn.commit", "line_number": 178, "usage_type": "call"}, {"api_name": "bot.data.Data.conn", "line_number": 178, "usage_type": "attribute"}, {"api_name": "bot.data.Data", "line_number": 178, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 204, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 219, "usage_type": "call"}, {"api_name": "asyncio.TimeoutError", "line_number": 237, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 269, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 284, "usage_type": "call"}, {"api_name": "asyncio.TimeoutError", "line_number": 302, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.command", "line_number": 111, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 111, "usage_type": "name"}, {"api_name": "bot.data.Data.check_user_entry", "line_number": 324, "usage_type": "call"}, {"api_name": "bot.data.Data", "line_number": 324, "usage_type": "name"}, {"api_name": "bot.data.Data.c.execute", "line_number": 326, "usage_type": "call"}, {"api_name": "bot.data.Data.c", "line_number": 326, "usage_type": "attribute"}, {"api_name": "bot.data.Data", "line_number": 326, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 330, "usage_type": "call"}, {"api_name": "bot.data.Data.c.fetchone", "line_number": 330, "usage_type": "call"}, {"api_name": "bot.data.Data.c", "line_number": 330, "usage_type": "attribute"}, {"api_name": "bot.data.Data", "line_number": 330, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 331, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 318, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 318, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 349, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 344, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 344, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 364, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 355, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 355, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 384, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 375, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 375, "usage_type": "name"}]} +{"seq_id": "34420857959", "text": "import datetime as dt\nimport logging\nimport tempfile\nimport time\nimport traceback\nfrom collections import defaultdict\nfrom copy import deepcopy\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom jinja2 import Template\nfrom stratagemdataprocessing.data_api import get_capital_timeseries\nfrom stratagemdataprocessing.parsing.common.stickers import sticker_parts_from_sticker, parse_sticker\nfrom tabulate import tabulate\nfrom sgmtradingcore.backtesting.backtest import FrameworkHistoricalProviders\nimport sgmtradingcore.backtesting.persistence as persistence\nfrom sgmtradingcore.backtesting.automatic_backtest import TestResult, split_by_time_period, _get_size_matched, \\\n _get_size, get_strategy_code_from_instruction, _get_orders_debug_print\nfrom sgmtradingcore.backtesting.backtest_runner import run_backtest_main\nfrom sgmtradingcore.core.notifications import send_trading_system_email\nfrom sgmtradingcore.core.trading_types import OrderStatus\nfrom sgmtradingcore.strategies.config.configurations import TRADING_USER_MAP\nfrom sgmtradingcore.strategies.realtime import StrategyFactory\nfrom sgmtradingcore.strategies.strategy_base import StrategyStyle\nfrom sgmtradingcore.analytics.comparison.trades_stats import StrategyRunStatsHelper\n\n\ndef _send_email_retry(text, html_, subject, to_, attachments=None):\n try:\n send_trading_system_email(text, html_, subject, to_, files=attachments)\n except:\n time.sleep(4)\n send_trading_system_email(text, html_, subject, to_, files=attachments)\n\n\ndef _get_order_statuses(comparison_input, tmp_dir):\n values = [(comparison_input.name1, comparison_input.all_orders1),\n (comparison_input.name2, comparison_input.all_orders2)]\n file_paths = []\n message = \"\"\n for name, orders in values:\n message += \"Tot {} {} orders\\n\".format(name, len(orders))\n\n per_bookmaker_count = {}\n orders_by_bookmaker_and_status = dict()\n for o in orders:\n if 'provider' in o['execution_details']:\n bm = \"{}_{}\".format(o['execution_details']['provider'], o['execution_details']['bookmaker'])\n else:\n bm = \"{}\".format(o['execution_details']['bookmaker'])\n if bm not in orders_by_bookmaker_and_status:\n orders_by_bookmaker_and_status[bm] = {OrderStatus.REJECTED: 0,\n OrderStatus.FAILED: 0,\n OrderStatus.CANCELLED: 0,\n OrderStatus.SETTLED: 0}\n if OrderStatus(o['status']) not in orders_by_bookmaker_and_status[bm]:\n orders_by_bookmaker_and_status[bm][OrderStatus(o['status'])] = 0\n orders_by_bookmaker_and_status[bm][OrderStatus(o['status'])] += 1\n\n if bm not in per_bookmaker_count:\n per_bookmaker_count[bm] = 0\n per_bookmaker_count[bm] += 1\n\n headers = sorted([bm for bm, _ in orders_by_bookmaker_and_status.iteritems()])\n data = []\n for s in OrderStatus:\n line = list()\n line.append(str(s))\n for bm in headers:\n statuses = orders_by_bookmaker_and_status[bm]\n if s in statuses:\n line.append(statuses[s])\n else:\n line.append(0)\n data.append(line)\n message += tabulate(data, headers=headers)\n\n labels = sorted(orders_by_bookmaker_and_status.keys())\n values_rejected_perc = [float(orders_by_bookmaker_and_status[bm][OrderStatus.REJECTED]) / per_bookmaker_count[bm]\n for bm in labels]\n values_failed_perc = [float(orders_by_bookmaker_and_status[bm][OrderStatus.FAILED]) / per_bookmaker_count[bm]\n for bm in labels]\n values_settled_perc = [float(orders_by_bookmaker_and_status[bm][OrderStatus.SETTLED]) / per_bookmaker_count[bm]\n for bm in labels]\n values_cancelled_perc = [float(orders_by_bookmaker_and_status[bm][OrderStatus.CANCELLED]) /\n per_bookmaker_count[bm] for bm in labels]\n if not len(values_rejected_perc):\n values_rejected_perc = [0.0]\n if not len(values_failed_perc):\n values_failed_perc = [0.0]\n if not len(values_settled_perc):\n values_settled_perc = [0.0]\n if not len(values_cancelled_perc):\n values_cancelled_perc = [0.0]\n\n plt.bar(range(max(len(labels), 1)),\n values_rejected_perc,\n width=0.3,\n label='% Rejected',\n color='y')\n plt.bar(range(max(len(labels), 1)),\n values_failed_perc,\n width=0.3,\n bottom=values_rejected_perc,\n label='% Failed',\n color='r')\n plt.bar(range(max(len(labels), 1)),\n values_settled_perc,\n width=0.3,\n bottom=[x + y for x, y in zip(values_rejected_perc, values_failed_perc)],\n label='% Settled',\n color='b'\n )\n plt.bar(range(max(len(labels), 1)),\n values_cancelled_perc,\n width=0.3,\n bottom=[x + y + z for x, y, z in zip(values_rejected_perc, values_failed_perc, values_settled_perc)],\n label='% Cancelled',\n color='g'\n )\n\n # plt.axis(labels)\n\n plt.tight_layout(pad=3)\n # plt.gcf().subplots_adjust(bottom=0.15)\n plt.xticks(range(len(labels)), labels, rotation=-90)\n plt.legend()\n\n plt.title('Statuses {}'.format(name))\n file_paths.append('%s/%s' % (tmp_dir, \"statuses_{}.png\".format(name)))\n plt.savefig(file_paths[-1])\n plt.close()\n message += \"\\n\\n\"\n\n res = TestResult(name=\"Orders status count\",\n good_report_message=message,\n attachments=file_paths,\n )\n res.success = False\n return res\n\n\ndef _get_order_sources(comparison_input):\n message = \"{} orders source\\n\".format(comparison_input.name1)\n headers = set()\n for o in comparison_input.all_orders1:\n source = o['source'] if 'source' in o else 'unknown'\n headers.add(source)\n headers = sorted(headers)\n lines = []\n\n for strategy in comparison_input.strategies:\n orders_count_by_source = dict()\n for o in strategy.orders1:\n source = o['source'] if 'source' in o else 'unknown'\n if source not in orders_count_by_source:\n orders_count_by_source[source] = 0\n orders_count_by_source[source] += 1\n\n line = [strategy.get_short_name()]\n for source in headers:\n if source not in orders_count_by_source:\n line.append(\"0\")\n else:\n line.append(\"{}\".format(orders_count_by_source[source]))\n lines.append(line)\n\n message += tabulate(lines, headers=headers)\n res = TestResult(name=\"Orders sources\",\n good_report_message=message,\n )\n res.success = False\n return res\n\n\ndef _get_pnl_distrib_by_period(comparison_input, start_datetime, end_datetime, tmp_dir, use_cache):\n \"\"\"\n\n :param comparison_input: type [ComparisonInput]\n :return: TestResult\n \"\"\"\n\n message = \"\"\n attachments = []\n for strategy in comparison_input.strategies:\n periods_prod = strategy.daily_periods_1\n periods_back = strategy.daily_periods_2\n pnls_back = [p.get_pnl() for p in periods_back if p.has_orders()]\n pnls_prod = [p.get_pnl() for p in periods_prod if p.has_orders()]\n # bins = np.linspace(min(pnls_prod+pnls_back+[0]), max(pnls_prod+pnls_back+[0]), len(periods_prod))\n bins = min(max(len(periods_back), len(periods_prod)), 50)\n # hist = hist*1.0/sum(hist) # normalize\n pnl_range = (min(pnls_back + pnls_prod + [0]), max(pnls_back + pnls_prod + [0]))\n if len(pnls_prod):\n plt.hist(pnls_prod,\n rwidth=1.0, # size of bars relative to the bins\n bins=bins,\n label='Pnl distrib {} {}/{} days'.format(\n comparison_input.name1, len(pnls_prod), len(periods_prod)),\n color='b',\n range=pnl_range,\n alpha=0.5)\n if len(pnls_back):\n plt.hist(pnls_back,\n rwidth=1.0,\n bins=bins,\n label='Pnl distrib {} {}/{} days'.format(\n comparison_input.name2, len(pnls_back), len(periods_back)),\n color='y',\n range=pnl_range,\n alpha=0.5)\n # plt.axis(labels)\n\n if len(pnls_prod) or len(pnls_back):\n plt.tight_layout(pad=3)\n # plt.gcf().subplots_adjust(bottom=0.15)\n # plt.xticks(range(len(labels)), labels, rotation=-90)\n plt.legend()\n plt.title('Daily pnl {}'.format(strategy.get_short_name()))\n\n file_path = '%s/%s' % (tmp_dir, \"pnl_distrib_{}.png\".format(strategy.get_short_name()))\n plt.savefig(file_path)\n plt.close()\n attachments.append(file_path)\n\n res = TestResult(name=\"Strategy PNL {}/{}\".format(comparison_input.name1, comparison_input.name2),\n good_report_message=message,\n attachments=attachments,\n )\n res.success = True\n return res\n\n\ndef _daterange(start_date, end_date):\n for n in range(int((end_date - start_date).days) + 1):\n yield start_date + dt.timedelta(n)\n\n\ndef _get_main_stats(comparison_input, start_datetime, end_datetime, tmp_dir, use_cache):\n \"\"\"\n Create main stats\n\n :param comparison_input: type [ComparisonInput]\n :return: TestResult\n \"\"\"\n\n headers = [\"Orders\", \"Settled\", \"Matched vol\",\n \"Avg daily ROI\", \"Tot PNL\", \"PNL Diff\",\n \"Avg daily Capital\"]\n lines = []\n message = \"\"\n attachments = []\n for strategy in comparison_input.strategies:\n daily_periods_prod = strategy.daily_periods_1\n daily_periods_back = strategy.daily_periods_2\n\n # Total ROI\n prod_daily_rois = []\n pnl_prod = 0.0\n for p in daily_periods_prod:\n date = p.start_datetime.date()\n capital_allocation = strategy.capitals_serie\n if isinstance(strategy.capitals_serie, dict):\n capital_allocation = strategy.capitals_serie[date] if date in strategy.capitals_serie else 0.\n pnl = p.get_pnl()\n pnl_prod += pnl\n if capital_allocation:\n roi = 100.0 * pnl / capital_allocation\n prod_daily_rois.append(roi)\n\n back_daily_rois = []\n pnl_back = 0.0\n for p in daily_periods_back:\n date = p.start_datetime.date()\n capital_allocation = strategy.capitals_serie\n if isinstance(strategy.capitals_serie, dict):\n capital_allocation = strategy.capitals_serie[date] if date in strategy.capitals_serie else 0.\n pnl = p.get_pnl()\n pnl_back += pnl\n if capital_allocation > 0.0:\n roi = 100.0 * pnl / capital_allocation\n back_daily_rois.append(roi)\n\n capitals = [c for c in strategy.capitals_serie.values() if c > 0.0] if isinstance(strategy.capitals_serie, dict) \\\n else [strategy.capitals_serie]\n avg_capital = int(sum(capitals) / len(capitals)) if len(capitals) else 0\n\n line = [strategy.get_short_name(),\n \"{:5.0f}/{:>5.0f}\".format(len(strategy.orders1), len(strategy.orders2)),\n \"{:5.0f}/{:>5.0f}\".format(len([o for o in strategy.orders1 if o['status'] == OrderStatus.SETTLED]),\n len([o for o in strategy.orders2 if o['status'] == OrderStatus.SETTLED])),\n \"{:8.0f}/{:>8.0f}\".format( # Matched vol\n sum([o['size_matched'] for o in strategy.orders1 if o['status'] == OrderStatus.SETTLED]),\n sum([o['size_matched'] for o in strategy.orders2 if o['status'] == OrderStatus.SETTLED])),\n \"{:5.2f}/{:>5.2f}%\".format(sum(prod_daily_rois) / len(prod_daily_rois) if len(prod_daily_rois) else 0.0,\n sum(back_daily_rois) / len(back_daily_rois) if len(back_daily_rois) else 0.0\n ),\n \"{:6.0f}/{:>6.0f}\".format(pnl_prod, pnl_back),\n \"{:5.1f}%\".format((pnl_back - pnl_prod) * 100 / abs(pnl_prod) if pnl_prod != 0. else 9999),\n \"{}\".format(avg_capital)\n ]\n lines.append(line)\n\n message += tabulate(lines, headers=headers, stralign='right')\n\n res = TestResult(name=\"ROI {}/{}\".format(comparison_input.name1, comparison_input.name2),\n good_report_message=message,\n attachments=attachments,\n )\n res.success = True\n return res\n\n\ndef _get_weekly_roi(comparison_input, start_datetime, end_datetime, tmp_dir, use_cache):\n \"\"\"\n Create weekly ROI chart and ROI ZScore chart\n\n :param comparison_input: type [ComparisonInput]\n :return: TestResult\n \"\"\"\n\n headers = [\"Orders\", \"Settled\", \"Avg daily ROI\", \"Tot PNL\", \"PNL Diff\", \"Avg daily Capital\"]\n lines = []\n message = \"\"\n attachments = []\n for strategy in comparison_input.strategies:\n # Weekly ROI\n weekly_periods_prod = split_by_time_period(strategy.orders1, 7,\n start_datetime.date(),\n end_datetime.date(),\n use_cache=use_cache)\n weekly_periods_back = split_by_time_period(strategy.orders2, 7,\n start_datetime.date(),\n end_datetime.date(),\n use_cache=use_cache)\n\n weekly_roi_prod = []\n for p in weekly_periods_prod:\n daily_rois = []\n pnls = p.get_daily_pnl()\n for date in _daterange(p.start_datetime.date(), p.end_datetime.date()):\n capital_allocation = strategy.capitals_serie\n if isinstance(strategy.capitals_serie, dict):\n capital_allocation = strategy.capitals_serie[date] if date in strategy.capitals_serie else 0.\n pnl = pnls[date]\n roi = 100.0 * pnl / capital_allocation if capital_allocation else 0.0\n daily_rois.append(roi)\n\n weekly_roi_prod.append(sum(daily_rois) / len(daily_rois))\n\n weekly_roi_back = []\n for p in weekly_periods_back:\n daily_rois = []\n pnls = p.get_daily_pnl()\n for date in _daterange(p.start_datetime.date(), p.end_datetime.date()):\n capital_allocation = strategy.capitals_serie\n if isinstance(strategy.capitals_serie, dict):\n capital_allocation = strategy.capitals_serie[date] if date in strategy.capitals_serie else 0.\n pnl = pnls[date]\n roi = 100.0 * pnl / capital_allocation if capital_allocation else 0.0\n daily_rois.append(roi)\n\n weekly_roi_back.append(sum(daily_rois) / len(daily_rois))\n\n labels = [o.start_datetime.strftime('%Y-%m-%d') for o in weekly_periods_back]\n if len(weekly_roi_prod) or len(weekly_roi_back):\n plt.plot(weekly_roi_prod,\n 'b-',\n label='{} %ROI'.format(comparison_input.name1),\n alpha=0.5)\n plt.plot(weekly_roi_back,\n 'y-',\n label='{} %ROI'.format(comparison_input.name2),\n alpha=0.5)\n plt.xticks(range(len(labels)), labels, rotation=17)\n plt.legend()\n plt.title('Weekly ROI {}'.format(strategy.get_short_name()))\n file_path = '%s/%s' % (tmp_dir, \"weekly_ROI_{}.png\".format(strategy.get_short_name()))\n plt.savefig(file_path)\n attachments.append(file_path)\n plt.close()\n\n # Weekly ROI ZScore\n roi_df = pd.DataFrame({\n '{}_ROI'.format(comparison_input.name1): weekly_roi_prod,\n '{}_ROI'.format(comparison_input.name2): weekly_roi_back,\n })\n\n zscores = \\\n (roi_df - roi_df.rolling(window=len(roi_df), min_periods=1).mean()) / \\\n roi_df.rolling(window=len(roi_df), min_periods=1).std()\n zscores.fillna(0, inplace=True) # replaces Nan with zeroes\n\n labels = [o.start_datetime.strftime('%Y-%m-%d') for o in weekly_periods_back]\n if len(weekly_roi_prod) or len(weekly_roi_back):\n plt.plot(zscores['{}_ROI'.format(comparison_input.name1)],\n 'b-',\n label='{} ZScore'.format(comparison_input.name1),\n alpha=0.5)\n plt.plot(zscores['{}_ROI'.format(comparison_input.name2)],\n 'y-',\n label='{} ZScore'.format(comparison_input.name2),\n alpha=0.5)\n plt.xticks(range(len(labels)), labels, rotation=17)\n plt.legend()\n plt.title('Weekly ROI zscore {}'.format(strategy.get_short_name()))\n file_path = '%s/%s' % (tmp_dir, \"weekly_ROI_zscore_{}.png\".format(strategy.get_short_name()))\n plt.savefig(file_path)\n attachments.append(file_path)\n plt.close()\n\n message += tabulate(lines, headers=headers, stralign='right')\n\n res = TestResult(name=\"ROI {}/{}\".format(comparison_input.name1, comparison_input.name2),\n good_report_message=message,\n attachments=attachments,\n )\n res.success = True\n return res\n\n\ndef _get_extra_stats(comparison_input, start_datetime, end_datetime, tmp_dir, use_cache):\n \"\"\"\n :param comparison_input: type [ComparisonInput]\n :return: TestResult\n \"\"\"\n\n headers = [\"Matched vol\", \"Matched win\", \"Matched lose\",\n \"Tot PNL\", \"Win\", \"Lose\", \"Win over Matched win\", \"Lose over Matched Lose\"]\n lines = []\n message = \"\"\n attachments = []\n for strategy in comparison_input.strategies:\n prod_matched_win = sum(\n [o['size_matched'] for o in strategy.orders1 if o['status'] == OrderStatus.SETTLED and _get_order_pnl(o) > 0])\n back_matched_win = sum(\n [o['size_matched'] for o in strategy.orders2 if o['status'] == OrderStatus.SETTLED and _get_order_pnl(o) > 0])\n prod_matched_lose = sum(\n [o['size_matched'] for o in strategy.orders1 if o['status'] == OrderStatus.SETTLED and _get_order_pnl(o) < 0])\n back_matched_lose = sum(\n [o['size_matched'] for o in strategy.orders2 if o['status'] == OrderStatus.SETTLED and _get_order_pnl(o) < 0])\n\n pnl_prod = sum([_get_order_pnl(o) for o in strategy.orders1])\n pnl_back = sum([_get_order_pnl(o) for o in strategy.orders2])\n win_prod = sum([_get_order_pnl(o) for o in strategy.orders1 if _get_order_pnl(o) > 0])\n win_back = sum([_get_order_pnl(o) for o in strategy.orders2 if _get_order_pnl(o) > 0])\n lose_prod = sum([_get_order_pnl(o) for o in strategy.orders1 if _get_order_pnl(o) < 0])\n lose_back = sum([_get_order_pnl(o) for o in strategy.orders2 if _get_order_pnl(o) < 0])\n\n line = [strategy.get_short_name(),\n \"{:8.0f}/{:>8.0f}\".format( # Matched vol\n sum([o['size_matched'] for o in strategy.orders1 if o['status'] == OrderStatus.SETTLED]),\n sum([o['size_matched'] for o in strategy.orders2 if o['status'] == OrderStatus.SETTLED])),\n \"{:8.0f}/{:>8.0f}\".format(prod_matched_win, back_matched_win), # Matched win\n \"{:8.0f}/{:>8.0f}\".format(prod_matched_lose, back_matched_lose), # Matched lose\n \"{:6.0f}/{:>6.0f}\".format(pnl_prod, pnl_back), # Tot PNL\n \"{:6.0f}/{:>6.0f}\".format(win_prod, win_back), # Win\n \"{:6.0f}/{:>6.0f}\".format(lose_prod, lose_back), # Lose\n \"{:2.3f}/{:>2.3f}\".format(0.0 if prod_matched_win == 0 else float(win_prod / prod_matched_win),\n 0.0 if back_matched_win == 0 else float(win_back / back_matched_win)),\n # Win over Matched Win\n \"{:2.3f}/{:>2.3f}\".format(0.0 if prod_matched_lose == 0 else float(lose_prod / prod_matched_lose),\n 0.0 if back_matched_lose == 0 else float(lose_back / back_matched_lose)),\n # Lose over Matched Lose\n ]\n lines.append(line)\n\n message += tabulate(lines, headers=headers, stralign='right')\n\n res = TestResult(name=\"Vol stats {}/{}\".format(comparison_input.name1, comparison_input.name2),\n good_report_message=message,\n attachments=attachments,\n )\n res.success = True\n return res\n\n\ndef _get_daily_roi(comparison_input, start_datetime, end_datetime, tmp_dir, use_cache):\n \"\"\"\n Create daily cumulative ROI chart\n\n :param comparison_input: type [ComparisonInput]\n :return: TestResult\n \"\"\"\n\n message = \"\"\n attachments = []\n for strategy in comparison_input.strategies:\n daily_periods_prod = strategy.daily_periods_1\n daily_periods_back = strategy.daily_periods_2\n\n daily_roi_prod = []\n for p in daily_periods_prod:\n daily_rois = []\n pnls = p.get_daily_pnl()\n for date in _daterange(p.start_datetime.date(), p.end_datetime.date()):\n capital_allocation = strategy.capitals_serie\n if isinstance(strategy.capitals_serie, dict):\n capital_allocation = strategy.capitals_serie[date] if date in strategy.capitals_serie else 0.0\n pnl = pnls[date]\n roi = 100.0 * pnl / capital_allocation if capital_allocation else 0.0\n daily_rois.append(roi)\n\n daily_roi_prod.append(sum(daily_rois) / len(daily_rois))\n\n daily_roi_back = []\n for p in daily_periods_back:\n daily_rois = []\n pnls = p.get_daily_pnl()\n for date in _daterange(p.start_datetime.date(), p.end_datetime.date()):\n capital_allocation = strategy.capitals_serie\n if isinstance(strategy.capitals_serie, dict):\n capital_allocation = strategy.capitals_serie[date] if date in strategy.capitals_serie else 0.0\n pnl = pnls[date]\n roi = 100.0 * pnl / capital_allocation if capital_allocation else 0.0\n daily_rois.append(roi)\n\n daily_roi_back.append(sum(daily_rois) / len(daily_rois))\n\n cumulative_back_roi = np.cumsum(daily_roi_back)\n cumulative_prod_roi = np.cumsum(daily_roi_prod)\n\n labels = [''] * len(daily_periods_prod)\n for i in range(0, len(daily_periods_prod), 7):\n labels[i] = daily_periods_prod[i].start_datetime.strftime('%Y-%m-%d')\n if len(cumulative_prod_roi) or len(cumulative_back_roi):\n plt.plot(cumulative_prod_roi,\n 'b-',\n label='{} ROI'.format(comparison_input.name1),\n alpha=0.5)\n plt.plot(cumulative_back_roi,\n 'y-',\n label='{} ROI'.format(comparison_input.name2),\n alpha=0.5)\n plt.xticks(range(len(labels)), labels, rotation=17)\n plt.legend()\n plt.title('Daily cumulative ROI {}'.format(strategy.get_short_name()))\n file_path = '%s/%s' % (tmp_dir, \"daily_cumulative_ROI_{}.png\".format(strategy.get_short_name()))\n plt.savefig(file_path)\n attachments.append(file_path)\n plt.close()\n\n res = TestResult(name=\"Daily ROI {}/{}\".format(comparison_input.name1, comparison_input.name2),\n good_report_message=message,\n attachments=attachments,\n )\n res.success = True\n return res\n\n\ndef _print_capital(comparison_input, start_datetime, end_datetime, tmp_dir):\n \"\"\"\n Create daily cumulative ROI chart\n\n :param comparison_input: type [ComparisonInput]\n :return: TestResult\n \"\"\"\n\n message = \"See line graph\"\n attachments = []\n for strategy in comparison_input.strategies:\n\n dates = sorted([d for d in _daterange(start_datetime.date(), end_datetime.date())])\n labels = [''] * len(dates)\n for i in range(0, len(dates), 7):\n labels[i] = dates[i].strftime('%Y-%m-%d')\n\n capital_by_day = [strategy.capitals_serie[date] for date in dates] if isinstance(strategy.capitals_serie, dict) \\\n else [strategy.capitals_serie for date in dates]\n\n if len(capital_by_day):\n plt.plot(capital_by_day,\n 'r-',\n label='Allocated capital',\n alpha=0.5)\n plt.xticks(range(len(labels)), labels, rotation=17)\n plt.legend()\n plt.title('Daily capital {}'.format(strategy.get_short_name()))\n file_path = '%s/%s' % (tmp_dir, \"daily_capital_{}.png\".format(strategy.get_short_name()))\n plt.savefig(file_path)\n attachments.append(file_path)\n plt.close()\n\n res = TestResult(name=\"Daily capital\",\n good_report_message=message,\n attachments=attachments,\n )\n res.success = True\n return res\n\n\ndef _print_capital_and_missed_volume(comparison_input, start_datetime, end_datetime, tmp_dir, use_cache):\n \"\"\"\n Create daily cumulative ROI chart\n\n :param comparison_input: type [ComparisonInput]\n :return: TestResult\n \"\"\"\n\n message = \"See line graph\"\n attachments = []\n for strategy in comparison_input.strategies:\n\n dates = sorted([d for d in _daterange(start_datetime.date(), end_datetime.date())])\n labels = [''] * len(dates)\n for i in range(0, len(dates), 7):\n labels[i] = dates[i].strftime('%Y-%m-%d')\n\n capital_by_day = [strategy.capitals_serie[date] for date in dates]\n\n missed_vol = {}\n missed_pnl = {}\n for k in strategy.capitals_serie.keys():\n missed_vol[k] = 0.\n missed_pnl[k] = 0.\n\n daily_periods_prod = split_by_time_period(strategy.instructions1, 1,\n start_datetime.date(),\n end_datetime.date(),\n use_instructions=True,\n use_cache=use_cache)\n\n missed_vols = []\n missed_pnl = []\n for period in daily_periods_prod:\n vol, pnl = period.get_missed_vol()\n missed_vols.append(vol)\n missed_pnl.append(pnl)\n\n if len(capital_by_day):\n plt.plot(capital_by_day,\n 'r-',\n label='Allocated capital')\n plt.plot(missed_vols,\n 'y-',\n label='missed vols prod')\n plt.plot(missed_pnl,\n 'g-',\n label='missed pnl prod')\n\n plt.xticks(range(len(labels)), labels, rotation=17)\n plt.legend()\n plt.title('Daily capital and missed {}'.format(strategy.get_short_name()))\n file_path = '%s/%s' % (tmp_dir, \"daily_capital_and_missed{}.png\".format(strategy.get_short_name()))\n plt.savefig(file_path)\n attachments.append(file_path)\n plt.close()\n\n res = TestResult(name=\"Daily capital and missed\",\n good_report_message=message,\n attachments=attachments,\n )\n res.success = True\n return res\n\n\ndef _get_different_events(comparison_input):\n \"\"\"\n\n :param comparison_input: type [ComparisonInput]\n :return: TestResult\n \"\"\"\n\n message = \"\"\n headers = [\"{}_only\".format(comparison_input.name1), \"{}_only\".format(comparison_input.name2),\n \"common\", \"total\", \"diff%\"]\n data = []\n for strategy in comparison_input.strategies:\n prod_events = set([parse_sticker(o['sticker'])[1][1] for o in strategy.orders1])\n backtest_events = set([parse_sticker(o['sticker'])[1][1] for o in strategy.orders2])\n common_events = prod_events & backtest_events\n union_events = prod_events | backtest_events\n n_prod_only = len(prod_events) - len(common_events)\n n_back_only = len(backtest_events) - len(common_events)\n\n line = list()\n line.append(strategy.get_short_name())\n line.extend([n_prod_only,\n n_back_only,\n len(common_events),\n len(union_events),\n \"{:.0f}%\".format(\n ((n_prod_only + n_back_only) * 100.0 / len(union_events)) if len(union_events) else 0.0)\n ])\n data.append(line)\n message += tabulate(data, headers=headers, stralign='right')\n\n res = TestResult(name=\"Different events\".format(),\n good_report_message=message,\n )\n res.success = True\n return res\n\n\nclass PriceSlippage(object):\n def __init__(self):\n self.back_vol = 0.0\n self.back_vol_matched = 0.0\n self.back_price_times_vol = 0.0\n self.back_price_matched_times_vol = 0.0\n self.lay_vol = 0.0\n self.lay_vol_matched = 0.0\n self.lay_price_times_vol = 0.0\n self.lay_price_matched_times_vol = 0.0\n\n self.back_avg_price = None\n self.back_avg_matched_price = None\n self.lay_avg_price = None\n self.lay_avg_matched_price = None\n\n self.back_perc_match_price_vs_asked = None\n self.lay_perc_match_price_vs_asked = None\n\n def make_avg(self):\n self.back_avg_price = self.back_price_times_vol / self.back_vol if self.back_vol > 0.0 else 0.0\n self.back_avg_matched_price = (self.back_price_matched_times_vol / self.back_vol_matched) \\\n if self.back_vol_matched > 0.0 else 0.0\n self.back_perc_match_price_vs_asked = (100.0 * self.back_avg_matched_price / self.back_avg_price) - 100 \\\n if self.back_avg_price else 0.0\n\n self.lay_avg_price = self.lay_price_times_vol / self.lay_vol if self.lay_vol > 0.0 else 0.0\n self.lay_avg_matched_price = (self.lay_price_matched_times_vol / self.lay_vol_matched) \\\n if self.lay_vol_matched > 0.0 else 0.0\n self.lay_perc_match_price_vs_asked = (100.0 * self.lay_avg_matched_price / self.lay_avg_price) - 100 \\\n if self.lay_avg_price else 0.0\n\n\ndef _get_prices_avg_by_vol(orders):\n prices = PriceSlippage()\n for o in orders:\n if o['status'] not in [OrderStatus.SETTLED]:\n continue\n vol = _get_size(o)\n matched_vol = _get_size_matched(o)\n price = o['price']\n matched_price = o['average_price_matched']\n if o['bet_side'] == 'back':\n prices.back_vol += vol\n prices.back_vol_matched += matched_vol\n prices.back_price_times_vol += price * vol\n prices.back_price_matched_times_vol += matched_price * matched_vol\n else:\n prices.lay_vol += vol\n prices.lay_vol_matched += matched_vol\n prices.lay_price_times_vol += price * vol\n prices.lay_price_matched_times_vol += matched_price * matched_vol\n\n prices.make_avg()\n return prices\n\n\ndef _price_slippage(comparison_input):\n \"\"\"\n\n :param comparison_input: type [ComparisonInput]\n :return: TestResult\n \"\"\"\n\n message = \"\"\n headers_back = [\"{} price\".format(comparison_input.name1),\n \"{} matched\".format(comparison_input.name1),\n \"{} price\".format(comparison_input.name2),\n \"{} matched\".format(comparison_input.name2),\n ]\n headers_lay = [\"{} price\".format(comparison_input.name1),\n \"{} matched\".format(comparison_input.name1),\n \"{} price\".format(comparison_input.name2),\n \"{} matched\".format(comparison_input.name2),\n ]\n data = []\n for strategy in comparison_input.strategies:\n prod_prices = _get_prices_avg_by_vol(strategy.orders1)\n back_prices = _get_prices_avg_by_vol(strategy.orders2)\n line = list()\n line.append(strategy.get_short_name())\n line.extend([\"{:.2f}\".format(prod_prices.back_avg_price),\n \"{:.2f}\".format(prod_prices.back_avg_matched_price),\n \"{:.2f}\".format(back_prices.back_avg_price),\n \"{:.2f}\".format(back_prices.back_avg_matched_price),\n ])\n data.append(line)\n message += \"Back prices:\\n\"\n message += tabulate(data, headers=headers_back)\n\n data = []\n for strategy in comparison_input.strategies:\n prod_prices = _get_prices_avg_by_vol(strategy.orders1)\n back_prices = _get_prices_avg_by_vol(strategy.orders2)\n line = list()\n line.append(strategy.get_short_name())\n line.extend([\"{:.2f}\".format(prod_prices.lay_avg_price),\n \"{:.2f}\".format(prod_prices.lay_avg_matched_price),\n \"{:.2f}\".format(back_prices.lay_avg_price),\n \"{:.2f}\".format(back_prices.lay_avg_matched_price),\n ])\n data.append(line)\n message += \"Lay prices:\\n\"\n message += tabulate(data, headers=headers_lay)\n\n res = TestResult(name=\"Price slippage\".format(),\n good_report_message=message\n )\n res.success = True\n return res\n\n\ndef _get_orders_by_bookmaker(orders):\n orders_by_bookmaker = dict()\n for o in orders:\n if 'provider' in o['execution_details']:\n bm = \"{}_{}\".format(o['execution_details']['provider'], o['execution_details']['bookmaker'])\n else:\n bm = \"{}\".format(o['execution_details']['bookmaker'])\n if bm not in orders_by_bookmaker:\n orders_by_bookmaker[bm] = []\n orders_by_bookmaker[bm].append(o)\n return orders_by_bookmaker\n\n\ndef _price_slippage_by_venue(comparison_input):\n \"\"\"\n\n :param comparison_input: type [ComparisonInput]\n :return: TestResult\n \"\"\"\n\n message = \"\"\n\n all_bm = []\n for strategy in comparison_input.strategies:\n all_bm.extend(_get_orders_by_bookmaker(strategy.orders1).keys())\n all_bm = list(set(all_bm))\n\n # Prod Back\n data = []\n for strategy in comparison_input.strategies:\n line = list()\n line.append(strategy.get_short_name())\n prod_orders_by_bm = _get_orders_by_bookmaker(strategy.orders1)\n for bm in all_bm:\n if bm in prod_orders_by_bm:\n orders = prod_orders_by_bm[bm]\n prod_prices = _get_prices_avg_by_vol(orders)\n line.extend([\"{:>5.2f}%\".format(prod_prices.back_perc_match_price_vs_asked)])\n else:\n line.extend([\"-\"])\n data.append(line)\n message += \"\\n{} B prices matched vs asked:\\n\".format(comparison_input.name1)\n message += tabulate(data, headers=all_bm, stralign='right')\n\n # Prod Lay\n data = []\n for strategy in comparison_input.strategies:\n line = list()\n line.append(strategy.get_short_name())\n prod_orders_by_bm = _get_orders_by_bookmaker(strategy.orders1)\n for bm in all_bm:\n if bm in prod_orders_by_bm:\n orders = prod_orders_by_bm[bm]\n prod_prices = _get_prices_avg_by_vol(orders)\n line.extend([\"{:<5.2f}%\".format(prod_prices.lay_perc_match_price_vs_asked)])\n else:\n line.extend([\"-\"])\n data.append(line)\n message += \"\\n\\n{} L prices matched vs asked:\\n\".format(comparison_input.name1)\n message += tabulate(data, headers=all_bm)\n\n # Backtest Back\n data = []\n for strategy in comparison_input.strategies:\n line = list()\n line.append(strategy.get_short_name())\n back_orders_by_bm = _get_orders_by_bookmaker(strategy.orders2)\n for bm in all_bm:\n if bm in back_orders_by_bm:\n orders = back_orders_by_bm[bm]\n back_prices = _get_prices_avg_by_vol(orders)\n line.extend([\"{:>5.2f}%\".format(back_prices.back_perc_match_price_vs_asked)])\n else:\n line.extend([\"-\"])\n data.append(line)\n message += \"\\n\\n{} B prices matched vs asked:\\n\".format(comparison_input.name2)\n message += tabulate(data, headers=all_bm, stralign='right')\n\n # Backtest Lay\n data = []\n for strategy in comparison_input.strategies:\n line = list()\n line.append(strategy.get_short_name())\n back_orders_by_bm = _get_orders_by_bookmaker(strategy.orders2)\n for bm in all_bm:\n if bm in back_orders_by_bm:\n orders = back_orders_by_bm[bm]\n back_prices = _get_prices_avg_by_vol(orders)\n line.extend([\"{:<5.2f}%\".format(back_prices.lay_perc_match_price_vs_asked)])\n else:\n line.extend([\"-\"])\n data.append(line)\n message += \"\\n\\n{} L prices matched vs asked:\\n\".format(comparison_input.name2)\n message += tabulate(data, headers=all_bm)\n\n res = TestResult(name=\"Price slippage by venue\".format(),\n good_report_message=message\n )\n res.success = True\n return res\n\n\ndef _get_order_pnl(order):\n return StrategyRunStatsHelper.get_order_pnl(order)\n\n\ndef _get_pnl_per_event(comparison_input):\n \"\"\"\n\n :param comparison_input: type [ComparisonInput]\n :return: TestResult\n \"\"\"\n\n message = \"\"\n\n all_bm1 = []\n for strategy in comparison_input.strategies:\n all_bm1.extend(_get_orders_by_bookmaker(strategy.orders1).keys())\n all_bm1 = list(set(all_bm1))\n\n # Prod Back\n data = []\n for strategy in comparison_input.strategies:\n\n settled1 = [o for o in strategy.orders1 if o['status'] == OrderStatus.SETTLED]\n settled2 = [o for o in strategy.orders2 if o['status'] == OrderStatus.SETTLED]\n events_pnl1 = defaultdict(int)\n events_pnl2 = defaultdict(int)\n events_matched1 = defaultdict(int)\n events_matched2 = defaultdict(int)\n for o in settled1:\n _, (_, event_id), _, params, _ = parse_sticker(o['sticker'])\n events_pnl1[event_id] += _get_order_pnl(o)\n events_matched1[event_id] += o['size_matched'] if o['status'] == OrderStatus.SETTLED else 0\n for o in settled2:\n _, (_, event_id), _, params, _ = parse_sticker(o['sticker'])\n events_pnl2[event_id] += _get_order_pnl(o)\n events_matched2[event_id] += o['size_matched'] if o['status'] == OrderStatus.SETTLED else 0\n all_events = set()\n all_events.update(set(events_pnl1.keys()))\n all_events.update(set(events_pnl2.keys()))\n\n for event_id in all_events:\n line = list()\n line.append(\"{}\".format(event_id))\n line.append(\"{:>5.2f}\".format(events_pnl1[event_id]))\n line.append(\"{:>5.2f}\".format(events_pnl2[event_id]))\n line.append(\"{:>5.2f}\".format(abs(events_pnl1[event_id] - events_pnl2[event_id])))\n line.append(\"{:>5.2f}\".format(events_matched1[event_id]))\n line.append(\"{:>5.2f}\".format(events_matched2[event_id]))\n line.append(\"{:>5.2f}\".format(abs(events_matched1[event_id] - events_matched2[event_id])))\n data.append(line)\n\n message += \"\\n\\n PNL per event: {}\\n\".format(strategy.get_short_name())\n message += tabulate(data, headers=[\"event\", comparison_input.name1, comparison_input.name2, \"diff\",\n \"Matched1\", \"Matched2\", \"Diff\"], stralign='right')\n\n res = TestResult(name=\"PNL per event\".format(),\n good_report_message=message\n )\n res.success = True\n return res\n\n\ndef _get_pnl_zscore(comparison_input, start_datetime, end_datetime, tmp_dir, use_cache):\n \"\"\"\n\n :param comparison_input: type [ComparisonInput]\n :return: TestResult\n \"\"\"\n\n message = \"See the line graph\"\n attachments = []\n for strategy in comparison_input.strategies:\n periods_prod = split_by_time_period(strategy.orders1, 7,\n start_datetime.date(),\n end_datetime.date(),\n use_cache=use_cache)\n\n periods_back = split_by_time_period(strategy.orders2, 7,\n start_datetime,\n end_datetime,\n use_cache=use_cache)\n\n back_pnls = pd.Series([p.get_pnl() for p in periods_back],\n index=[p.start_datetime.date() for p in periods_back])\n prod_pnls = pd.Series([p.get_pnl() for p in periods_prod],\n index=[p.start_datetime.date() for p in periods_prod])\n pnl_df = pd.DataFrame({\n '{}_pnl'.format(comparison_input.name1): prod_pnls,\n '{}_pnl'.format(comparison_input.name2): back_pnls,\n })\n\n zscores = \\\n (pnl_df - pnl_df.rolling(window=len(pnl_df), min_periods=1).mean()) / \\\n pnl_df.rolling(window=len(pnl_df), min_periods=1).std()\n zscores.fillna(0, inplace=True) # replaces Nan with zeroes\n\n labels = [o.start_datetime.strftime('%Y-%m-%d') for o in periods_back]\n plt.plot(zscores['{}_pnl'.format(comparison_input.name1)],\n 'b-',\n label='{} ZScore'.format(comparison_input.name1),\n alpha=0.5)\n plt.plot(zscores['{}_pnl'.format(comparison_input.name2)],\n 'y-',\n label='{} ZScore'.format(comparison_input.name2),\n alpha=0.5)\n plt.xticks(range(len(labels)), labels, rotation=17)\n plt.legend()\n plt.title('Weekly pnl zscore {}'.format(strategy.get_short_name()))\n file_path = '%s/%s' % (tmp_dir, \"weekly_zscore_{}.png\".format(strategy.get_short_name()))\n plt.savefig(file_path)\n attachments.append(file_path)\n plt.close()\n\n res = TestResult(name=\"ZScore by week\",\n good_report_message=message,\n attachments=attachments)\n res.success = True\n return res\n\n\ndef _get_weekly_cumulative_pnl(comparison_input, start_datetime, end_datetime, tmp_dir, use_cache):\n \"\"\"\n\n :param comparison_input: type [ComparisonInput]\n :return: TestResult\n \"\"\"\n\n message = \"See the line graph\"\n attachments = []\n for strategy in comparison_input.strategies:\n periods_prod = split_by_time_period(strategy.orders1, 7,\n start_datetime.date(),\n end_datetime.date(),\n use_cache=use_cache)\n\n periods_back = split_by_time_period(strategy.orders2, 7,\n start_datetime,\n end_datetime,\n use_cache=use_cache)\n\n back_pnls = [p.get_pnl() for p in periods_back]\n prod_pnls = [p.get_pnl() for p in periods_prod]\n cumulative_back_pnls = np.cumsum(back_pnls)\n cumulative_prod_pnls = np.cumsum(prod_pnls)\n\n labels = [o.start_datetime.strftime('%Y-%m-%d') for o in periods_back]\n plt.plot(cumulative_prod_pnls,\n 'b-',\n label='{} pnl'.format(comparison_input.name1),\n alpha=0.5)\n plt.plot(cumulative_back_pnls,\n 'y-',\n label='{} pnl'.format(comparison_input.name2),\n alpha=0.5)\n plt.xticks(range(len(labels)), labels, rotation=17)\n plt.legend()\n plt.title('Weekly cumulative PNL {}'.format(strategy.get_short_name()))\n file_path = '%s/%s' % (tmp_dir, \"weekly_cumulative_pnl_{}.png\".format(strategy.get_short_name()))\n plt.savefig(file_path)\n attachments.append(file_path)\n plt.close()\n\n res = TestResult(name=\"Weekly cumulative pnl\",\n good_report_message=message,\n attachments=attachments)\n res.success = True\n return res\n\n\ndef _get_daily_cumulative_pnl(comparison_input, start_datetime, end_datetime, tmp_dir, use_cache):\n \"\"\"\n\n :param comparison_input: type [ComparisonInput]\n :return: TestResult\n \"\"\"\n\n message = \"See the line graph\"\n attachments = []\n for strategy in comparison_input.strategies:\n periods_prod = strategy.daily_periods_1\n periods_back = strategy.daily_periods_2\n\n back_pnls = [p.get_pnl() for p in periods_back]\n prod_pnls = [p.get_pnl() for p in periods_prod]\n cumulative_back_pnls = np.cumsum(back_pnls)\n cumulative_prod_pnls = np.cumsum(prod_pnls)\n\n labels = [''] * len(periods_back)\n for i in range(0, len(periods_back), 7):\n labels[i] = periods_back[i].start_datetime.strftime('%Y-%m-%d')\n plt.plot(cumulative_prod_pnls,\n 'b-',\n label='{} pnl'.format(comparison_input.name1),\n alpha=0.5)\n plt.plot(cumulative_back_pnls,\n 'y-',\n label='{} pnl'.format(comparison_input.name2),\n alpha=0.5)\n plt.xticks(range(len(labels)), labels, rotation=17)\n plt.legend()\n plt.title('Daily cumulative PNL {}'.format(strategy.get_short_name()))\n file_path = '%s/%s' % (tmp_dir, \"daily_cumulative_pnl_{}.png\".format(strategy.get_short_name()))\n plt.savefig(file_path)\n attachments.append(file_path)\n plt.close()\n\n res = TestResult(name=\"Daily cumulative pnl\",\n good_report_message=message,\n attachments=attachments)\n res.success = True\n return res\n\n\ndef _get_daily_pnl(comparison_input, start_datetime, end_datetime, tmp_dir, use_cache):\n \"\"\"\n\n :param comparison_input: type [ComparisonInput]\n :return: TestResult\n \"\"\"\n\n message = \"See the line graph\"\n attachments = []\n for strategy in comparison_input.strategies:\n periods_prod = strategy.daily_periods_1\n periods_back = strategy.daily_periods_2\n\n back_pnls = [p.get_pnl() for p in periods_back]\n prod_pnls = [p.get_pnl() for p in periods_prod]\n\n labels = [''] * len(periods_back)\n for i in range(0, len(periods_back), 7):\n labels[i] = periods_back[i].start_datetime.strftime('%Y-%m-%d')\n plt.plot(prod_pnls,\n 'b-',\n label='{} pnl'.format(comparison_input.name1),\n alpha=0.5)\n plt.plot(back_pnls,\n 'y-',\n label='{} pnl'.format(comparison_input.name2),\n alpha=0.5)\n plt.xticks(range(len(labels)), labels, rotation=17)\n plt.legend()\n plt.title('Daily PNL {}'.format(strategy.get_short_name()))\n file_path = '%s/%s' % (tmp_dir, \"Daily_pnl_{}.png\".format(strategy.get_short_name()))\n plt.savefig(file_path)\n attachments.append(file_path)\n plt.close()\n\n res = TestResult(name=\"Daily pnl\",\n good_report_message=message,\n attachments=attachments)\n res.success = True\n return res\n\n\ndef _get_daily_cumulative_vol(comparison_input, start_datetime, end_datetime, tmp_dir, use_cache):\n \"\"\"\n\n :param comparison_input: type [ComparisonInput]\n :return: TestResult\n \"\"\"\n\n message = \"See the line graph\"\n attachments = []\n for strategy in comparison_input.strategies:\n periods_prod = strategy.daily_periods_1\n periods_back = strategy.daily_periods_2\n\n back_vols = [p.get_matched_vol() for p in periods_back]\n prod_vols = [p.get_matched_vol() for p in periods_prod]\n cumulative_back_vols = np.cumsum(back_vols)\n cumulative_prod_vols = np.cumsum(prod_vols)\n\n labels = [''] * len(periods_back)\n for i in range(0, len(periods_back), 7):\n labels[i] = periods_back[i].start_datetime.strftime('%Y-%m-%d')\n plt.plot(cumulative_prod_vols,\n 'b-',\n label='{} pnl'.format(comparison_input.name1),\n alpha=0.5)\n plt.plot(cumulative_back_vols,\n 'y-',\n label='{} pnl'.format(comparison_input.name2),\n alpha=0.5)\n plt.xticks(range(len(labels)), labels, rotation=17)\n plt.legend()\n plt.title('Daily cumulative matched vol {}'.format(strategy.get_short_name()))\n file_path = '%s/%s' % (tmp_dir, \"daily_cumulative_vol_{}.png\".format(strategy.get_short_name()))\n plt.savefig(file_path)\n attachments.append(file_path)\n plt.close()\n\n res = TestResult(name=\"Daily cumulative vol\",\n good_report_message=message,\n attachments=attachments)\n res.success = True\n return res\n\n\ndef _print_orders(comparison_input, start_datetime, end_datetime):\n message = \"\"\n for strategy in comparison_input.strategies:\n message += \"{}\\n\".format(strategy.get_short_name())\n message += _get_orders_debug_print(strategy.orders1, strategy.orders2, {},\n strategy.strategy_name, strategy.strategy_desc,\n strategy.strategy_code, strategy.trading_user_id,\n start_datetime, end_datetime,\n name1=comparison_input.name1, name2=comparison_input.name2\n )\n\n res = TestResult(name=\"Orders\",\n good_report_message=message)\n res.success = True\n return res\n\n\nclass Report(object):\n def __init__(self, results, start, end):\n self.results = results\n self.simulated_start = start\n self.simulated_end = end\n self.messages = []\n\n def send_email(self, notifiees):\n subject = \"Weekly report {}_{} {} days\".format(self.simulated_start.strftime('%Y-%m-%d'),\n self.simulated_end.strftime('%Y-%m-%d'),\n (self.simulated_end - self.simulated_start).days + 1)\n body = self._make_email_body()\n content = body\n attachments = []\n for r in self.results:\n attachments.extend(r.attachments)\n self.send_report_emails(subject, content, notifiees, attachments)\n\n def _make_email_body(self):\n body = \"\"\n for r in self.results:\n if \"\" != r.good_report_message:\n section = \"{}\\n{}:\\n{}\\n\".format(\"-------------\", r.test_name, r.good_report_message)\n body += \"{}\\n\".format(section)\n\n if self.messages:\n body += \"\\nExtra info:\\n\"\n for m in self.messages:\n body += \"{}\\n\".format(m)\n return body\n\n def send_report_emails(self, subject, message, notifiees, attachments):\n start_date_str = self.simulated_start.strftime('%Y-%m-%d')\n end_date_str = self.simulated_end.strftime('%Y-%m-%d')\n num_days = (self.simulated_end - self.simulated_start).days + 1\n template = Template(\"\"\"\n Report from {{start_date}} to {{end_date}} - {{num_days}} days\\n\n\n <pre>\n \\n{{ message }}\\n\n </pre>\n \"\"\")\n html_ = template.render(start_date=start_date_str, end_date=end_date_str, num_days=num_days, message=message)\n _send_email_retry('', html_, subject, notifiees, attachments=attachments)\n\n def add_extra_messages(self, messages):\n \"\"\"\n Ad extra messages to be sent in the email\n :param messages:\n :return:\n \"\"\"\n self.messages.extend(messages)\n\n\nclass StrategyInput(object):\n def __init__(self, strategy_name, strategy_desc, trading_user_id, strategy_code):\n self.strategy_name = strategy_name\n self.strategy_desc = strategy_desc\n self.trading_user_id = trading_user_id\n self.strategy_code = strategy_code\n self.orders1 = list()\n self.orders2 = list()\n self.daily_periods_1 = list()\n self.daily_periods_2 = list()\n self.capitals_serie = None # Not used in running but only for stats. Can be a serie or fixed amount\n self.instructions1 = list()\n self.instructions2 = list()\n\n def __str__(self):\n return \"{} {} {} {}\".format(self.strategy_name, self.strategy_desc, self.trading_user_id, self.strategy_code)\n\n def get_short_name(self):\n message = \"{}\".format(self.strategy_name)\n if self.strategy_desc is not None:\n message += \" {}\".format(self.strategy_desc)\n if self.strategy_code is not None:\n message += \" {}\".format(self.strategy_code)\n if self.trading_user_id is not None:\n if self.trading_user_id in TRADING_USER_MAP:\n message += \" {}\".format(TRADING_USER_MAP[self.trading_user_id])\n else:\n message += \" {}\".format(TRADING_USER_MAP[self.trading_user_id])\n return message\n\n\nclass ComparisonInput(object):\n def __init__(self, strategies):\n self.strategies = strategies # orders in different strategies can contain duplicates\n self.all_orders1 = [] # must not contains duplicates\n self.all_orders2 = [] # must not contains duplicates\n self.name1 = \"\"\n self.name2 = \"\"\n\n\ndef make_report_automatic_backtesting(start_datetime, end_datetime, strategies, cmd_line, use_cache):\n \"\"\"\n Fetch production and automatic_backtest orders then run the comparison\n\n :param start_datetime:\n :param end_datetime:\n :param strategies: type [StrategyInput]\n :param cmd_line:\n :param use_cache: use local file caches for fixture ids\n :return:\n \"\"\"\n mongo_helper = persistence.MongoStrategyHelper()\n tmp_dir = tempfile.mkdtemp(suffix=\"weekly_report\")\n\n loaded = mongo_helper.get_prod_orders_between_datetimes(start_datetime - dt.timedelta(hours=12),\n end_datetime + dt.timedelta(hours=12),\n use_cache=use_cache)\n # loaded = [o for o in loaded if str(o['trading_user_id']) in TRADING_USER_MAP.keys()]\n all_backtest_orders = []\n\n for strategy in strategies:\n orders = loaded\n if strategy.strategy_name is not None:\n orders = [o for o in orders if 'strategy' in o and o['strategy'] == strategy.strategy_name]\n if strategy.strategy_desc is not None:\n orders = [o for o in orders if 'strategy_descr' in o and o['strategy_descr'] == strategy.strategy_desc]\n if strategy.trading_user_id is not None:\n orders = [o for o in orders if\n 'trading_user_id' in o and str(o['trading_user_id']) == strategy.trading_user_id]\n if strategy.strategy_code is not None:\n orders = [o for o in orders if 'strategy_code' in o and o['strategy_code'] == strategy.strategy_code]\n strategy.orders1 = orders\n\n instructions_backtest, orders_backtest = mongo_helper.get_backtest_result_multiple_days(\n strategy.strategy_name, strategy.strategy_desc, strategy.trading_user_id, strategy.strategy_code,\n start_datetime.date().strftime('%Y-%m-%d'),\n end_datetime.date().strftime('%Y-%m-%d'),\n 'automatic', 'test_range_1')\n orders_backtest = _get_last_backtest_order_status(orders_backtest)\n strategy.orders2 = orders_backtest\n\n all_backtest_orders.extend(orders_backtest)\n\n comparison_input = ComparisonInput(strategies)\n comparison_input.name1 = 'prod'\n comparison_input.name2 = 'auto_back'\n non_duplicate_ords1 = []\n ords1_keys = set()\n\n for s in strategies:\n for o in s.orders1:\n if o['_id'] not in ords1_keys:\n ords1_keys.add(o['_id'])\n non_duplicate_ords1.append(o)\n comparison_input.all_orders1 = non_duplicate_ords1\n non_duplicate_ords2 = list(np.unique(np.array(all_backtest_orders)))\n comparison_input.all_orders2 = non_duplicate_ords2\n\n report = make_report_comparison(start_datetime, end_datetime, comparison_input, tmp_dir, use_cache)\n _ = cmd_line\n return report\n\n\ndef _run_for_strategy(start_datetime, end_datetime, strategies, cmd_line, repopulate, notifiees, use_fixture_cache,\n mnemonic='report', action_delay_ms=0, use_spark=False, devpi_user=None, devpi_index=None,\n spark_app_name=None, use_multiprocess=False, n_workers=0, extra_strategy_args=None, capital=None,\n strategy_factory=StrategyFactory, framework_providers=FrameworkHistoricalProviders):\n \"\"\"\n :param action_delay_ms: add this delay to action data\n :return: list of error messages\n \"\"\"\n if extra_strategy_args is None:\n extra_strategy_args = {}\n range_to_run = (start_datetime, end_datetime, {}, 'report_range_1')\n\n just_repopulated_strategies = list()\n config_id_str = None\n errors = []\n\n for strategy in strategies:\n logging.info(\"Runs for {} mnemonic={}\".format(str(strategy), mnemonic))\n logging.info(\"Runs for {}\".format(str(strategy)))\n if strategy.strategy_name in ['Analyst_FTOUG', 'Analyst_FTAHG']:\n _extra_strategy_args = {'is_backtest': True}\n else:\n _extra_strategy_args = {}\n _extra_strategy_args.update(extra_strategy_args)\n\n if strategy.strategy_name in ['Analyst_FTOUG', 'Analyst_FTAHG', 'FFM_FTOUG', 'FFM_FTAHG']:\n _extra_strategy_args.update({'algo_type': 'SimpleAlgo'})\n\n if strategy.strategy_desc is not None:\n strategy_descs = [strategy.strategy_desc]\n else:\n strategy_class = strategy_factory.create_strategy_class_no_cheks(strategy.strategy_name,\n strategy.strategy_desc,\n strategy.strategy_code,\n strategy.trading_user_id,\n True, {})\n strategy_descs = strategy_class.get_valid_strategy_desc(strategy.strategy_name)\n strategy_descs = [o for o in strategy_descs if o != 'all']\n for strategy_desc in strategy_descs:\n trading_user_ids = [strategy.trading_user_id]\n if strategy.trading_user_id is None:\n trading_user_ids = ['562f5bef497aee1c22000001', # :'Algosports',\n '54da2b5fd47e6bff0dade9b4'] # : 'Stratagem',\n\n for trading_user_id in trading_user_ids:\n strategy_codes = [strategy.strategy_code]\n if strategy.strategy_code is None:\n strategy_class = strategy_factory.create_strategy_class_no_cheks(strategy.strategy_name,\n strategy.strategy_desc,\n strategy.strategy_code,\n strategy.trading_user_id,\n True, {})\n strategy_codes = strategy_class.get_valid_strategy_code(strategy.strategy_name, strategy_desc)\n\n for strategy_code in strategy_codes:\n extra_backtest_args = {\n 'repopulate': repopulate,\n 'use_fixture_cache': use_fixture_cache,\n 'allocated_capital': capital, # Is not None, use constant capital for every backtest\n 'use_spark': use_spark,\n 'days_after': 1,\n 'save_n_days': 7,\n 'spark_driver_memory': '8g',\n 'spark_executor_memory': '8g',\n 'spark_app_name': \"backt {} {} {} {}_{} {}\".format(\n strategy.strategy_name, strategy_desc, strategy_code,\n start_datetime.date(), end_datetime.date(), mnemonic)\n if spark_app_name is None else spark_app_name,\n 'devpi_user': devpi_user,\n 'devpi_index': devpi_index,\n 'action_delay_ms': action_delay_ms,\n 'use_multiprocess': use_multiprocess,\n 'n_workers': n_workers,\n 'store_signals': False,\n }\n try:\n success = True\n logging.info(\"Running {} {} {} {}\".format(strategy.strategy_name, strategy_desc, strategy_code,\n trading_user_id))\n\n extra_backtest_args_copy = deepcopy(extra_backtest_args)\n if repopulate:\n k = (strategy.strategy_name, strategy_desc, strategy_code, trading_user_id)\n if k in just_repopulated_strategies:\n # Avoid repopulating and running multiple times\n extra_backtest_args_copy.update({'repopulate': False})\n logging.info(\"Strategy {} just repopulated, not running again\".format(str(k)))\n\n config_id = run_backtest_main(strategy.strategy_name, strategy_desc, strategy_code,\n trading_user_id,\n _extra_strategy_args, extra_backtest_args_copy, config_id_str,\n mnemonic, cmd_line, {}, range=range_to_run,\n strategy_factory=strategy_factory,\n framework_providers=framework_providers)\n if repopulate:\n k = (strategy.strategy_name, strategy_desc, strategy_code, trading_user_id)\n just_repopulated_strategies.append(k)\n\n except Exception as e:\n message = \"Could not run {} {} {} {}\\n\".format(strategy.strategy_name, strategy_desc,\n strategy_code, trading_user_id)\n message += \"From {} to {}\\n\".format(range_to_run[0], range_to_run[1])\n message += \"Error is: {}\\n\".format(e.message)\n tb = traceback.format_exc()\n message += \"{}\\n\".format(tb)\n\n logging.error(message)\n logging.error(\"{}\\n\".format(tb))\n subject = \"Error while running for report\"\n _send_email_retry(message, message, subject, notifiees)\n errors.append(message)\n\n logging.info(\"Run {} {} {} {} done\".format(strategy.strategy_name, strategy_desc, strategy_code,\n trading_user_id))\n return errors\n\n\ndef _get_capital_timeserie(strategy_name, strategy_desc, strategy_code, trading_user_id, start_datetime, end_datetime,\n strategy_factory=StrategyFactory):\n strategy_obj, strategy_class = strategy_factory.create_strategy(strategy_name,\n strategy_desc,\n strategy_code,\n trading_user_id,\n True, {})\n sport = strategy_obj.get_sport()\n trading_user_id = trading_user_id\n capitals_series = None\n if capitals_series is None:\n def try_get_capital_timeseries(tr_usr_id, in_sport, str_style, start_bkt, end_bkt, str_name, descr, default):\n try:\n ret = get_capital_timeseries(tr_usr_id, in_sport, str_style, start_bkt, end_bkt, str_name,\n strategy_descr=descr)\n return ret\n except Exception:\n return default\n\n # Get the historical capital time series, updated daily\n capitals_series = {descr: try_get_capital_timeseries(trading_user_id, sport,\n StrategyStyle.to_str(strategy_obj.get_style()),\n start_datetime,\n end_datetime, strategy_name,\n descr, defaultdict(lambda: 0))\n for descr in strategy_obj.strategy_run_ids}\n return capitals_series\n\n\ndef _add_historical_capital_series(strategies, start_datetime, end_datetime, strategy_factory=StrategyFactory):\n for strategy in strategies:\n strategy_series = list()\n if strategy.strategy_desc is not None:\n strategy_descs = [strategy.strategy_desc]\n else:\n strategy_class = strategy_factory.create_strategy_class_no_cheks(strategy.strategy_name,\n strategy.strategy_desc,\n strategy.strategy_code,\n strategy.trading_user_id,\n True, {})\n strategy_descs = strategy_class.get_valid_strategy_desc(strategy.strategy_name)\n # Special case for Analyst\n strategy_descs = [d for d in strategy_descs if d != 'all']\n for strategy_desc in strategy_descs:\n trading_user_ids = [strategy.trading_user_id]\n if strategy.trading_user_id is None:\n trading_user_ids = ['562f5bef497aee1c22000001', # :'Algosports',\n '54da2b5fd47e6bff0dade9b4'] # : 'Stratagem',\n\n for trading_user_id in trading_user_ids:\n strategy_codes = [strategy.strategy_code]\n if strategy.strategy_code is None:\n strategy_class = strategy_factory.create_strategy_class_no_cheks(strategy.strategy_name,\n strategy_desc,\n strategy.strategy_code,\n strategy.trading_user_id,\n True, {})\n strategy_codes = strategy_class.get_valid_strategy_code(strategy.strategy_name, strategy_desc)\n # Capital allocation are shared among all strategy codes\n strategy_code = strategy_codes[0]\n s = _get_capital_timeserie(strategy.strategy_name, strategy_desc, strategy_code, trading_user_id,\n start_datetime, end_datetime, strategy_factory=strategy_factory)\n strategy_series.append(s)\n\n # sum all allocations\n all_key_dates = []\n for d in strategy_series:\n for strat, dates in d.iteritems():\n all_key_dates.extend(dates.keys())\n all_key_dates = sorted(list(set(all_key_dates)))\n new_serie = dict()\n for k in all_key_dates:\n if k not in new_serie:\n new_serie[k] = 0.0\n for d in strategy_series:\n for strat, dates in d.iteritems():\n if k in dates:\n new_serie[k] += dates[k]\n\n for date in _daterange(start_datetime.date(), end_datetime.date()):\n if date not in new_serie:\n new_serie[date] = 0.0\n\n strategy.capitals_serie = new_serie\n\n\ndef _load_prod_orders(start_datetime, end_datetime, use_cache):\n _mongo_helper = persistence.MongoStrategyHelper()\n orders = _mongo_helper.get_prod_orders_between_datetimes(start_datetime - dt.timedelta(hours=12),\n end_datetime + dt.timedelta(hours=12),\n use_cache=use_cache)\n orders = [o for o in orders if 'strategy' in o and 'sticker' in o and 'ANTONIS' not in o['strategy']]\n orders = [o for o in orders if str(o['trading_user_id']) in ['54da2b5fd47e6bff0dade9b4',\n '562f5bef497aee1c22000001']]\n\n sports = list(set(sticker_parts_from_sticker(i['sticker']).sport for i in orders))\n fixtures = persistence.fetch_fixtures_ids(start_datetime, end_datetime,\n sports,\n use_cache=use_cache)\n fixture_ids = set(f_id for _, f_id in fixtures)\n orders = [o for o in orders if sticker_parts_from_sticker(o['sticker']).scope[1] in fixture_ids]\n # Apply strategy_code where needed, fetching it from the instruction\n instr_ids_to_query = []\n orders_to_reparse = []\n for o in orders:\n if o['strategy'] in ['tennis_sip_ATP', 'tennis_sip_WTA', 'tennis_sip_v2_ATP', 'tennis_sip_v2_WTA',\n 'tennis_lip_ATP', 'tennis_lip_WTA', 'bball_pbp', 'tennis_tot_games_ATP',\n 'tennis_sip_template_ATP', 'tennis_sip_template_WTA',\n 'tennis_deadball_ensemble']:\n if 'instruction_id' in o:\n instr_ids_to_query.append(o['instruction_id'])\n orders_to_reparse.append(o)\n else:\n o.update({'strategy_code': None})\n instr_ids_to_query = list(set(instr_ids_to_query))\n logging.info(\"Loading {} instructions...\".format(len(instr_ids_to_query)))\n instructions_map = _mongo_helper.get_prod_instructions_by_ids(instr_ids_to_query)\n for o in orders_to_reparse:\n strategy_code = None\n if 'instruction_id' in o:\n if str(o['instruction_id']) in instructions_map:\n instruction = instructions_map[str(o['instruction_id'])]\n strategy_code = get_strategy_code_from_instruction(instruction)\n else:\n logging.error(\"Instruction {} not in map for order {}\".format(str(o['instruction_id']), o))\n o.update({'strategy_code': strategy_code})\n return orders\n\n\ndef _load_prod_instructions(start_datetime, end_datetime):\n _mongo_helper = persistence.MongoStrategyHelper()\n instr1 = _mongo_helper.get_prod_instructions_between_datetimes(start_datetime - dt.timedelta(hours=12),\n end_datetime + dt.timedelta(hours=12),\n trading_user_id='562f5bef497aee1c22000001')\n instr2 = _mongo_helper.get_prod_instructions_between_datetimes(start_datetime - dt.timedelta(hours=12),\n end_datetime + dt.timedelta(hours=12),\n trading_user_id='54da2b5fd47e6bff0dade9b4')\n\n instr = [i for i in instr1 + instr2 if 'strategy_descr' in i]\n return instr\n\n\ndef _load_backtest_orders(strategy, start_datetime, end_datetime, mnemonic, extra_params=None,\n range_name='report_range_1', config_id=None, strategy_factory=StrategyFactory):\n strategy_class = strategy_factory.create_strategy_class_no_cheks(strategy.strategy_name, strategy.strategy_desc,\n strategy.strategy_code,\n strategy.trading_user_id,\n True, {})\n\n # just 1 config per strategy_code\n mongo_helper = persistence.MongoStrategyHelper()\n instr = []\n orders = []\n\n if strategy.strategy_desc is not None:\n strategy_descs = [strategy.strategy_desc]\n else:\n strategy_descs = strategy_class.get_valid_strategy_desc(strategy.strategy_name)\n # Special case for Analyst\n strategy_descs = [d for d in strategy_descs if d != 'all']\n\n for strategy_desc in strategy_descs:\n if strategy.trading_user_id is not None:\n trading_user_ids = [strategy.trading_user_id]\n else:\n trading_user_ids = ['562f5bef497aee1c22000001', # :'Algosports',\n '54da2b5fd47e6bff0dade9b4'] # : 'Stratagem',\n\n for trading_user_id in trading_user_ids:\n if strategy.strategy_code is not None:\n strategy_codes = [strategy.strategy_code]\n else:\n strategy_codes = strategy_class.get_valid_strategy_code(strategy.strategy_name, strategy_desc)\n\n for strategy_code in strategy_codes:\n config = strategy_class.get_default_configuration(strategy.strategy_name, strategy_desc,\n strategy_code, trading_user_id)\n if extra_params is not None:\n config.update(extra_params)\n\n if config_id is None:\n config_id = mongo_helper.ensure_configurations(strategy.strategy_name, strategy_desc, strategy_code,\n config)\n\n instructions_fetched, orders_fetched = mongo_helper.get_backtest_result_multiple_days(\n strategy.strategy_name, strategy_desc, trading_user_id, strategy_code,\n start_datetime.date().strftime('%Y-%m-%d'),\n end_datetime.date().strftime('%Y-%m-%d'),\n mnemonic=mnemonic, range_name=range_name, config_id=config_id)\n instr.extend(instructions_fetched)\n orders.extend(orders_fetched)\n\n return instr, orders\n\n\ndef _get_last_backtest_order_status(in_orders):\n \"\"\"\n Only keep the last orderState for every order.\n Assume that 'strategy_desc' has been added to the orders\n :param in_orders: order states as inputs\n :return:\n \"\"\"\n # bet_id is unreliable, since it depends from which day we had run from and to\n\n kept_keys = set()\n kept_orders = list()\n for o in in_orders:\n if o['status'] in [OrderStatus.SETTLED, OrderStatus.CANCELLED]:\n key = (o['strategy_desc'], o['sticker'], o['placed_time'], o['size'])\n kept_keys.add(key)\n kept_orders.append(o)\n for o in in_orders:\n if o['status'] not in [OrderStatus.SETTLED, OrderStatus.CANCELLED]:\n key = (o['strategy_desc'], o['sticker'], o['placed_time'], o['size'])\n if key not in kept_keys:\n kept_orders.append(o)\n return kept_orders\n\n\ndef make_report_new_run(start_datetime, end_datetime, strategies, cmd_line, repopulate, notifiees, use_cache,\n devpi_user=None, devpi_index=None, strategy_factory=StrategyFactory,\n framework_providers=FrameworkHistoricalProviders):\n \"\"\"\n Fetch production and backtesting orders then run the comparison\n\n :param strategies: type [StrategyInput]\n :param start_datetime:\n :param end_datetime:\n :param cmd_line: str, how we called this script\n :param repopulate: if true replace the database runs\n :param notifiees: list of email addresses to be notified for bug only\n :param use_cache: if True use local file cache for fixture_ids and mongo orders\n :return:\n \"\"\"\n\n run_errors = _run_for_strategy(start_datetime, end_datetime, strategies, cmd_line, repopulate, notifiees, use_cache,\n devpi_user=devpi_user, devpi_index=devpi_index, strategy_factory=strategy_factory,\n framework_providers=framework_providers)\n\n _add_historical_capital_series(strategies, start_datetime, end_datetime, strategy_factory=strategy_factory)\n\n tmp_dir = tempfile.mkdtemp(suffix=\"weekly_report\")\n\n loaded_prod_orders = _load_prod_orders(start_datetime, end_datetime, use_cache)\n loaded_prod_instructions = [] # _load_prod_instructions(start_datetime, end_datetime)\n\n all_backtest_orders = []\n\n for strategy in strategies:\n strategy_class = strategy_factory.create_strategy_class_no_cheks(strategy.strategy_name, strategy.strategy_desc,\n strategy.strategy_code,\n strategy.trading_user_id,\n True, {})\n\n instr = loaded_prod_instructions\n orders = loaded_prod_orders\n if strategy.strategy_name is not None:\n orders = [o for o in orders if 'strategy' in o and o['strategy'] == strategy.strategy_name]\n instr = [o for o in instr if 'strategy' in o and o['strategy'] == strategy.strategy_name]\n if strategy.strategy_desc is not None:\n orders = [o for o in orders if 'strategy_descr' in o and o['strategy_descr'] == strategy.strategy_desc]\n instr = [o for o in instr if 'strategy_descr' in o and o['strategy_descr'] == strategy.strategy_desc]\n if strategy.trading_user_id is not None:\n orders = [o for o in orders if\n 'trading_user_id' in o and str(o['trading_user_id']) == strategy.trading_user_id]\n instr = [o for o in instr if\n 'trading_user_id' in o and str(o['trading_user_id']) == strategy.trading_user_id]\n if strategy.strategy_code is not None:\n orders = [o for o in orders if 'strategy_code' in o and o['strategy_code'] == strategy.strategy_code]\n instr = [o for o in instr if 'strategy_code' in o and o['strategy_code'] == strategy.strategy_code]\n\n # Remove strategy_descs no longer used\n if strategy.strategy_desc is None:\n orders = [o for o in orders if 'strategy_descr' in o and\n o['strategy_descr'] in strategy_class.get_valid_strategy_desc(strategy.strategy_name)]\n instr = [o for o in instr if 'strategy_descr' in o and\n o['strategy_descr'] in strategy_class.get_valid_strategy_desc(strategy.strategy_name)]\n\n strategy.orders1 = orders\n strategy.instructions1 = instr\n instructions_backtest, orders_backtest = _load_backtest_orders(strategy, start_datetime, end_datetime, 'report',\n strategy_factory=strategy_factory)\n\n # Only keep last order status\n orders_backtest = _get_last_backtest_order_status(orders_backtest)\n # 'all' for Analyst is unsupported here for report\n orders_backtest = [o for o in orders_backtest if o['strategy_desc'] != 'all']\n strategy.orders2 = orders_backtest\n\n all_backtest_orders.extend(orders_backtest)\n\n comparison_input = ComparisonInput(strategies)\n comparison_input.name1 = 'prod'\n comparison_input.name2 = 'back'\n non_duplicate_ords1 = []\n ords1_keys = set()\n\n for s in strategies:\n for o in s.orders1:\n if o['_id'] not in ords1_keys:\n ords1_keys.add(o['_id'])\n non_duplicate_ords1.append(o)\n comparison_input.all_orders1 = non_duplicate_ords1\n non_duplicate_ords2 = list(np.unique(np.array(all_backtest_orders)))\n comparison_input.all_orders2 = non_duplicate_ords2\n\n report = make_report_comparison(start_datetime, end_datetime, comparison_input, tmp_dir, use_cache)\n report.add_extra_messages(run_errors)\n report.add_extra_messages([\"Note: the backtesting strategy has been run with the CURRENT configuration, \"\n \"not the historical one\",\n \"Price slippage is computed on order's price, which for certain strategy is \"\n \"worse than market price.\",\n \"Football_analyst historical version has multiple competitions, while the current one \"\n \"has fewer competitions.\"])\n return report\n\n\ndef make_report_new_run_action_delay(start_datetime, end_datetime, strategies, cmd_line, repopulate, notifiees,\n use_cache, use_spark=False, devpi_user=\"\", devpi_index=\"\", delay_ms=0,\n use_multiprocess=False, n_workers=0, strategy_factory=StrategyFactory):\n \"\"\"\n Compare backtesting with and without action data delays\n\n :param strategies: type [StrategyInput]\n :return:\n \"\"\"\n\n mnemonic2 = 'testing_action_delays_{}'.format(delay_ms)\n run_errors2 = _run_for_strategy(start_datetime, end_datetime, strategies, cmd_line, repopulate, notifiees,\n use_cache, mnemonic=mnemonic2, action_delay_ms=delay_ms, use_spark=use_spark,\n devpi_user=devpi_user, devpi_index=devpi_index, spark_app_name=mnemonic2,\n use_multiprocess=use_multiprocess, n_workers=n_workers,\n strategy_factory=strategy_factory)\n # run_errors2 = []\n\n mnemonic1 = 'testing_no_action_delays'\n run_errors1 = _run_for_strategy(start_datetime, end_datetime, strategies, cmd_line, repopulate, notifiees,\n use_cache, mnemonic=mnemonic1, action_delay_ms=0, use_spark=use_spark,\n devpi_user=devpi_user, devpi_index=devpi_index, spark_app_name=mnemonic1,\n use_multiprocess=use_multiprocess, n_workers=n_workers,\n strategy_factory=strategy_factory)\n\n # run_errors1 = []\n\n _add_historical_capital_series(strategies, start_datetime, end_datetime, strategy_factory=strategy_factory)\n tmp_dir = tempfile.mkdtemp(suffix=\"action_delay\")\n\n all_no_delay_orders = []\n all_delay_orders = []\n\n for strategy in strategies:\n # load no delays\n instructions_no_delays, orders_no_delays = _load_backtest_orders(strategy, start_datetime, end_datetime,\n mnemonic1, strategy_factory=strategy_factory)\n orders_no_delays = _get_last_backtest_order_status(orders_no_delays)\n # 'all' for Analyst is unsupported here for report\n orders_no_delays = [o for o in orders_no_delays if o['strategy_desc'] != 'all']\n instructions_no_delays = [o for o in instructions_no_delays if o['strategy_descr'] != 'all']\n strategy.orders1 = orders_no_delays\n strategy.instructions1 = instructions_no_delays\n all_no_delay_orders.extend(orders_no_delays)\n\n # load delays\n instructions_delays, orders_delays = _load_backtest_orders(strategy, start_datetime, end_datetime,\n mnemonic2, strategy_factory=strategy_factory)\n orders_delays = _get_last_backtest_order_status(orders_delays)\n # 'all' for Analyst is unsupported here for report\n orders_delays = [o for o in orders_delays if o['strategy_desc'] != 'all']\n instructions_delays = [o for o in instructions_delays if o['strategy_descr'] != 'all']\n strategy.orders2 = orders_delays\n strategy.instructions2 = instructions_delays\n all_delay_orders.extend(orders_delays)\n\n comparison_input = ComparisonInput(strategies)\n comparison_input.name1 = 'no_action_delays'\n comparison_input.name2 = 'action_delays_{}'.format(delay_ms)\n\n non_duplicate_ords1 = list(np.unique(np.array(all_no_delay_orders)))\n comparison_input.all_orders1 = non_duplicate_ords1\n non_duplicate_ords2 = list(np.unique(np.array(all_delay_orders)))\n comparison_input.all_orders2 = non_duplicate_ords2\n\n report = make_report_comparison(start_datetime, end_datetime, comparison_input, tmp_dir, use_cache)\n report.add_extra_messages(run_errors1 + run_errors2)\n report.add_extra_messages([\"Note: the backtesting strategy has been run with the CURRENT configuration, \"\n \"not the historical one\",\n \"Price slippage is computed on order's price, which for certain strategy is \"\n \"worse than market price.\"])\n return report\n\n\ndef make_report_analyst(start_datetime, end_datetime, strategies, cmd_line, repopulate, notifiees, use_cache,\n use_spark=False, devpi_user=\"\", devpi_index=\"\", use_multiprocess=False, n_workers=0,\n strategy_factory=StrategyFactory):\n \"\"\"\n Compare backtesting with different strategy args for sizing\n\n :param strategies: type [StrategyInput]\n :return:\n \"\"\"\n\n mnemonic1 = 'testing_sizing_flat_0005'\n extra_strategy_args1 = {\n 'type_sizing': 'flat',\n 'fix_size': 0.005\n }\n\n run_errors1 = _run_for_strategy(start_datetime, end_datetime, strategies, cmd_line, repopulate, notifiees,\n use_cache, mnemonic=mnemonic1, use_spark=use_spark, devpi_user=devpi_user,\n devpi_index=devpi_index, spark_app_name=mnemonic1,\n use_multiprocess=use_multiprocess,\n n_workers=n_workers, extra_strategy_args=extra_strategy_args1,\n strategy_factory=strategy_factory)\n\n mnemonic2 = 'testing_sizing_rich'.format()\n extra_strategy_args2 = {\n # 'sizing': 'flat'\n }\n run_errors2 = _run_for_strategy(start_datetime, end_datetime, strategies, cmd_line, repopulate, notifiees,\n use_cache, mnemonic=mnemonic2, use_spark=use_spark, devpi_user=devpi_user,\n devpi_index=devpi_index, spark_app_name=mnemonic2,\n use_multiprocess=use_multiprocess,\n n_workers=n_workers, extra_strategy_args=extra_strategy_args2,\n strategy_factory=strategy_factory)\n\n # Alternatively, use flat capital allocation setting capital=100000\n _add_historical_capital_series(strategies, start_datetime, end_datetime, strategy_factory=strategy_factory)\n\n tmp_dir = tempfile.mkdtemp(suffix=\"analyst_sizing\")\n\n orders1 = []\n orders2 = []\n\n for strategy in strategies:\n # load 1\n instructions1, orders1 = _load_backtest_orders(strategy, start_datetime, end_datetime, mnemonic1,\n strategy_factory=strategy_factory)\n orders1 = _get_last_backtest_order_status(orders1)\n # 'all' for Analyst is unsupported here for report\n orders1 = [o for o in orders1 if o['strategy_desc'] != 'all']\n instructions1 = [o for o in instructions1 if o['strategy_descr'] != 'all']\n strategy.orders1 = orders1\n strategy.instructions1 = instructions1\n\n # load 2\n instruction2, orders2 = _load_backtest_orders(strategy, start_datetime, end_datetime, mnemonic2,\n strategy_factory=strategy_factory)\n orders2 = _get_last_backtest_order_status(orders2)\n # 'all' for Analyst is unsupported here for report\n orders2 = [o for o in orders2 if o['strategy_desc'] != 'all']\n instruction2 = [o for o in instruction2 if o['strategy_descr'] != 'all']\n strategy.orders2 = orders2\n strategy.instructions2 = instruction2\n\n comparison_input = ComparisonInput(strategies)\n comparison_input.name1 = mnemonic1\n comparison_input.name2 = mnemonic2\n\n non_duplicate_ords1 = list(np.unique(np.array(orders1)))\n comparison_input.all_orders1 = non_duplicate_ords1\n non_duplicate_ords2 = list(np.unique(np.array(orders2)))\n comparison_input.all_orders2 = non_duplicate_ords2\n\n report = make_report_comparison(start_datetime, end_datetime, comparison_input, tmp_dir, use_cache)\n report.add_extra_messages(run_errors1 + run_errors2)\n return report\n\n\nclass StrategyBacktestRunInfo(object):\n def __init__(self, strategy_name, strategy_desc, trading_user_id, strategy_code, mnemonic, config_id=None,\n name=None, extra_params=None, strategy_factory=StrategyFactory):\n self.strategy_name = strategy_name\n self.strategy_desc = strategy_desc\n self.trading_user_id = trading_user_id # type string\n self.strategy_code = strategy_code\n self.mnemonic = mnemonic\n self.config_id = config_id # type string\n self.orders = list()\n self.instructions = list()\n self.capitals_serie = None # dict fetched from data_api or an int\n self.name = name\n self.extra_params = extra_params\n self._strategy_factory = strategy_factory\n if name is None:\n self.name = \"{}_{}_{}_{}_{}\".format(self.strategy_name, self.strategy_desc, self.strategy_code,\n self.mnemonic, self.config_id)\n\n def load_orders(self, start_date, end_date):\n if self.strategy_name is not None:\n strategy = self.make_strategy_input()\n start_datetime = dt.datetime.combine(start_date, dt.datetime.min.time())\n end_datetime = dt.datetime.combine(end_date, dt.datetime.max.time())\n instructions_backtest, orders_backtest = _load_backtest_orders(\n strategy, start_datetime, end_datetime, self.mnemonic, self.extra_params,\n strategy_factory=self._strategy_factory)\n\n # Only keep last order status\n orders_backtest = _get_last_backtest_order_status(orders_backtest)\n # 'all' for Analyst is unsupported here for report\n orders_backtest = [o for o in orders_backtest if o['strategy_desc'] != 'all']\n strategy.orders2 = orders_backtest\n self.orders = orders_backtest\n self.instructions = instructions_backtest\n\n def make_strategy_input(self):\n strategy = StrategyInput(self.strategy_name, self.strategy_desc, self.trading_user_id, self.strategy_code)\n return strategy\n\n\ndef make_report_two_runs(start_date, end_date, strategy_run_info1, strategy_run_info2, use_cache, capital):\n \"\"\"\n Fetch one or two different backtest runs and create the report.\n\n :param strategy_run_info1: type StrategyBacktestRunInfo\n :param strategy_run_info2: type StrategyBacktestRunInfo\n :param start_date: type datetime.Date\n :param end_date: type datetime.Date\n :param use_cache: if True use local file cache for fixture_ids and mongo orders\n :return:\n \"\"\"\n\n # _add_historical_capital_series(strategies, start_datetime, end_datetime, strategy_factory=strategy_factory)\n start_datetime = dt.datetime.combine(start_date, dt.datetime.min.time())\n end_datetime = dt.datetime.combine(end_date, dt.datetime.max.time())\n tmp_dir = tempfile.mkdtemp(suffix=\"weekly_report\")\n\n # load orders and instructions\n strategy_run_info1.load_orders(start_date, end_date)\n strategy_run_info2.load_orders(start_date, end_date)\n\n # a bit hacky: strategy_run_info1 is relative to two different strategies while strategy_input is only one\n strategy_input = strategy_run_info1.make_strategy_input()\n if capital is not None:\n strategy_input.capitals_serie = 10000\n else:\n _add_historical_capital_series([strategy_input], start_datetime, end_datetime\n # , strategy_factory=strategy_factory\n )\n\n strategy_input.orders1 = strategy_run_info1.orders\n strategy_input.orders2 = strategy_run_info2.orders\n strategy_input.instructions1 = strategy_run_info1.instructions\n strategy_input.instructions2 = strategy_run_info2.instructions\n\n comparison_input = ComparisonInput([strategy_input])\n comparison_input.name1 = strategy_run_info1.name\n comparison_input.name2 = strategy_run_info2.name\n comparison_input.all_orders1 = strategy_run_info1.orders\n comparison_input.all_orders2 = strategy_run_info2.orders\n\n report = make_report_comparison(start_datetime, end_datetime, comparison_input, tmp_dir, use_cache)\n return report\n\n\ndef make_report_comparison(start_datetime, end_datetime, comparison_input, tmp_dir, use_cache):\n \"\"\"\n Compare two runs of multiple strategies.\n This method can be used to compare two runs generically; it might be useful to compare two version of the same\n strategy in backtest.\n\n Support overlapped strategies E.g. (football_ess, None) means collecting together\n (football_ess, LTH) and (football_ess, OU)\n Return a Report for a list of StrategyInput.\n\n :param start_datetime:\n :param end_datetime:\n :param comparison_input: type ComparisonInput\n :param tmp_dir: temporary directory where to store the chart files\n :param use_cache: if True use local file cache for fixture_ids\n :return:\n \"\"\"\n results = []\n plt.ioff()\n\n logging.info(\"Creating daily periods...\")\n for strategy in comparison_input.strategies:\n logging.info(strategy.get_short_name())\n logging.info(comparison_input.name1)\n strategy.daily_periods_1 = split_by_time_period(strategy.orders1, 1,\n start_datetime.date(),\n end_datetime.date(),\n use_cache=use_cache)\n logging.info(comparison_input.name2)\n strategy.daily_periods_2 = split_by_time_period(strategy.orders2, 1,\n start_datetime.date(),\n end_datetime.date(),\n use_cache=use_cache)\n\n logging.info(\"_get_pnl_distrib_by_period\")\n result = _get_pnl_distrib_by_period(comparison_input, start_datetime, end_datetime, tmp_dir, use_cache)\n results.append(result)\n\n logging.info(\"_get_daily_roi\")\n result = _get_daily_roi(comparison_input, start_datetime, end_datetime, tmp_dir, use_cache)\n results.append(result)\n\n logging.info(\"_print_capital\")\n result = _print_capital(comparison_input, start_datetime, end_datetime, tmp_dir)\n results.append(result)\n\n # logging.info(\"_print_capital_and_missed_volume\")\n # result = _print_capital_and_missed_volume(comparison_input, start_datetime, end_datetime, tmp_dir, use_cache)\n # results.append(result)\n\n # logging.info(\"_get_weekly_roi\")\n # result = _get_weekly_roi(comparison_input, start_datetime, end_datetime, tmp_dir, use_cache)\n # results.append(result)\n\n logging.info(\"_get_main_stats\")\n result = _get_main_stats(comparison_input, start_datetime, end_datetime, tmp_dir, use_cache)\n results.append(result)\n\n logging.info(\"_get_extra_stats\")\n result = _get_extra_stats(comparison_input, start_datetime, end_datetime, tmp_dir, use_cache)\n results.append(result)\n\n # Order by provider, bookmaker and status\n logging.info(\"_get_order_statuses\")\n result = _get_order_statuses(comparison_input, tmp_dir)\n results.append(result)\n\n logging.info(\"_get_different_events\")\n result = _get_different_events(comparison_input)\n results.append(result)\n\n logging.info(\"_get_order_sources\")\n result = _get_order_sources(comparison_input)\n results.append(result)\n\n # result = _price_slippage(comparison_input)\n # results.append(result)\n #\n # result = _price_slippage_by_venue(comparison_input)\n # results.append(result)\n\n # result = _get_weekly_cumulative_pnl(comparison_input, start_datetime, end_datetime, tmp_dir, use_cache)\n # results.append(result)\n\n logging.info(\"_get_daily_cumulative_pnl\")\n result = _get_daily_cumulative_pnl(comparison_input, start_datetime, end_datetime, tmp_dir, use_cache)\n results.append(result)\n\n logging.info(\"_get_daily_pnl\")\n result = _get_daily_pnl(comparison_input, start_datetime, end_datetime, tmp_dir, use_cache)\n results.append(result)\n\n # logging.info(\"_get_pnl_per_event\")\n # result = _get_pnl_per_event(comparison_input)\n # results.append(result)\n\n # logging.info(\"_get_daily_cumulative_vol\")\n # result = _get_daily_cumulative_vol(comparison_input, start_datetime, end_datetime, tmp_dir, use_cache)\n # results.append(result)\n\n # result = _print_orders(comparison_input, start_datetime, end_datetime)\n # results.append(result)\n #\n # result = _get_pnl_zscore(comparison_input, start_datetime, end_datetime, tmp_dir)\n # results.append(result)\n\n return Report(results, start_datetime, end_datetime)\n\n\ndef compare_two_strategies_and_notify(in_strategy_name1, in_strategy_desc1, in_strategy_code1, in_trading_user_id1,\n mnemonic1, config_id1, extra_params1,\n in_strategy_name2, in_strategy_desc2, in_strategy_code2, in_trading_user_id2,\n mnemonic2, config_id2, extra_params2,\n in_start_date, in_end_date, in_capital, use_cache, notify):\n if in_start_date > in_end_date:\n raise ValueError(\"start_date > end_date {}>{}\".format(in_start_date, in_end_date))\n\n strategy_run_1 = StrategyBacktestRunInfo(in_strategy_name1, in_strategy_desc1, in_trading_user_id1,\n in_strategy_code1, mnemonic1, config_id1, extra_params=extra_params1)\n strategy_run_2 = StrategyBacktestRunInfo(in_strategy_name2, in_strategy_desc2, in_trading_user_id2,\n in_strategy_code2, mnemonic2, config_id2, extra_params=extra_params2)\n\n report = make_report_two_runs(in_start_date, in_end_date, strategy_run_1, strategy_run_2, use_cache, in_capital)\n report.send_email(notify)\n", "repo_name": "andreas-koukorinis/example_nbs", "sub_path": "backtesting/strategy_comparison.py", "file_name": "strategy_comparison.py", "file_ext": "py", "file_size_in_byte": 99557, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "97", "api": [{"api_name": "sgmtradingcore.core.notifications.send_trading_system_email", "line_number": 31, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 33, "usage_type": "call"}, {"api_name": "sgmtradingcore.core.notifications.send_trading_system_email", "line_number": 34, "usage_type": "call"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus.REJECTED", "line_number": 53, "usage_type": "attribute"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus", "line_number": 53, "usage_type": "name"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus.FAILED", "line_number": 54, "usage_type": "attribute"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus", "line_number": 54, "usage_type": "name"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus.CANCELLED", "line_number": 55, "usage_type": "attribute"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus", "line_number": 55, "usage_type": "name"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus.SETTLED", "line_number": 56, "usage_type": "attribute"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus", "line_number": 56, "usage_type": "name"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus", "line_number": 57, "usage_type": "call"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus", "line_number": 58, "usage_type": "call"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus", "line_number": 59, "usage_type": "call"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus", "line_number": 67, "usage_type": "name"}, {"api_name": "tabulate.tabulate", "line_number": 77, "usage_type": "call"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus.REJECTED", "line_number": 80, "usage_type": "attribute"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus", "line_number": 80, "usage_type": "name"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus.FAILED", "line_number": 82, "usage_type": "attribute"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus", "line_number": 82, "usage_type": "name"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus.SETTLED", "line_number": 84, "usage_type": "attribute"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus", "line_number": 84, "usage_type": "name"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus.CANCELLED", "line_number": 86, "usage_type": "attribute"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 128, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest.TestResult", "line_number": 136, "usage_type": "call"}, {"api_name": "tabulate.tabulate", "line_number": 169, "usage_type": "call"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest.TestResult", "line_number": 170, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 196, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 205, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 205, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 216, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 216, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 219, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 219, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 220, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 220, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 223, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 223, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 224, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 224, "usage_type": "name"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest.TestResult", "line_number": 227, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 237, "usage_type": "call"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus.SETTLED", "line_number": 291, "usage_type": "attribute"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus", "line_number": 291, "usage_type": "name"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus.SETTLED", "line_number": 292, "usage_type": "attribute"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus", "line_number": 292, "usage_type": "name"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus.SETTLED", "line_number": 294, "usage_type": "attribute"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus", "line_number": 294, "usage_type": "name"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus.SETTLED", "line_number": 295, "usage_type": "attribute"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus", "line_number": 295, "usage_type": "name"}, {"api_name": "tabulate.tabulate", "line_number": 305, "usage_type": "call"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest.TestResult", "line_number": 307, "usage_type": "call"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest.split_by_time_period", "line_number": 329, "usage_type": "call"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest.split_by_time_period", "line_number": 333, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 368, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 368, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 372, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 372, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 376, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 376, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 377, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 377, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 378, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 378, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 380, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 380, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 382, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 382, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 385, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 397, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 397, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 401, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 401, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 405, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 405, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 406, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 406, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 407, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 407, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 409, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 409, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 411, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 411, "usage_type": "name"}, {"api_name": "tabulate.tabulate", "line_number": 413, "usage_type": "call"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest.TestResult", "line_number": 415, "usage_type": "call"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus.SETTLED", "line_number": 436, "usage_type": "attribute"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus", "line_number": 436, "usage_type": "name"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus.SETTLED", "line_number": 438, "usage_type": "attribute"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus", "line_number": 438, "usage_type": "name"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus.SETTLED", "line_number": 440, "usage_type": "attribute"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus", "line_number": 440, "usage_type": "name"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus.SETTLED", "line_number": 442, "usage_type": "attribute"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus", "line_number": 442, "usage_type": "name"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus.SETTLED", "line_number": 453, "usage_type": "attribute"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus", "line_number": 453, "usage_type": "name"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus.SETTLED", "line_number": 454, "usage_type": "attribute"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus", "line_number": 454, "usage_type": "name"}, {"api_name": "tabulate.tabulate", "line_number": 469, "usage_type": "call"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest.TestResult", "line_number": 471, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 521, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 522, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 528, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 528, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 532, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 532, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 536, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 536, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 537, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 537, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 538, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 538, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 540, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 540, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 542, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 542, "usage_type": "name"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest.TestResult", "line_number": 544, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 573, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 573, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 577, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 577, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 578, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 578, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 579, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 579, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 581, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 581, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 583, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 583, "usage_type": "name"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest.TestResult", "line_number": 585, "usage_type": "call"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest.split_by_time_period", "line_number": 618, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 632, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 632, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 635, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 635, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 638, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 638, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 642, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 642, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 643, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 643, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 644, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 644, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 646, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 646, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 648, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 648, "usage_type": "name"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest.TestResult", "line_number": 650, "usage_type": "call"}, {"api_name": "stratagemdataprocessing.parsing.common.stickers.parse_sticker", "line_number": 670, "usage_type": "call"}, {"api_name": "stratagemdataprocessing.parsing.common.stickers.parse_sticker", "line_number": 671, "usage_type": "call"}, {"api_name": "tabulate.tabulate", "line_number": 687, "usage_type": "call"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest.TestResult", "line_number": 689, "usage_type": "call"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus.SETTLED", "line_number": 732, "usage_type": "attribute"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus", "line_number": 732, "usage_type": "name"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest._get_size", "line_number": 734, "usage_type": "call"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest._get_size_matched", "line_number": 735, "usage_type": "call"}, {"api_name": "tabulate.tabulate", "line_number": 784, "usage_type": "call"}, {"api_name": "tabulate.tabulate", "line_number": 799, "usage_type": "call"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest.TestResult", "line_number": 801, "usage_type": "call"}, {"api_name": "tabulate.tabulate", "line_number": 850, "usage_type": "call"}, {"api_name": "tabulate.tabulate", "line_number": 867, "usage_type": "call"}, {"api_name": "tabulate.tabulate", "line_number": 884, "usage_type": "call"}, {"api_name": "tabulate.tabulate", "line_number": 901, "usage_type": "call"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest.TestResult", "line_number": 903, "usage_type": "call"}, {"api_name": "sgmtradingcore.analytics.comparison.trades_stats.StrategyRunStatsHelper.get_order_pnl", "line_number": 911, "usage_type": "call"}, {"api_name": "sgmtradingcore.analytics.comparison.trades_stats.StrategyRunStatsHelper", "line_number": 911, "usage_type": "name"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus.SETTLED", "line_number": 932, "usage_type": "attribute"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus", "line_number": 932, "usage_type": "name"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus.SETTLED", "line_number": 933, "usage_type": "attribute"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus", "line_number": 933, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 934, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 935, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 936, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 937, "usage_type": "call"}, {"api_name": "stratagemdataprocessing.parsing.common.stickers.parse_sticker", "line_number": 939, "usage_type": "call"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus.SETTLED", "line_number": 941, "usage_type": "attribute"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus", "line_number": 941, "usage_type": "name"}, {"api_name": "stratagemdataprocessing.parsing.common.stickers.parse_sticker", "line_number": 943, "usage_type": "call"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus.SETTLED", "line_number": 945, "usage_type": "attribute"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus", "line_number": 945, "usage_type": "name"}, {"api_name": "tabulate.tabulate", "line_number": 962, "usage_type": "call"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest.TestResult", "line_number": 965, "usage_type": "call"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest.split_by_time_period", "line_number": 982, "usage_type": "call"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest.split_by_time_period", "line_number": 987, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 992, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 994, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 996, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 1007, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1007, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 1011, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1011, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 1015, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1015, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 1016, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1016, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 1017, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1017, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 1019, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1019, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 1021, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1021, "usage_type": "name"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest.TestResult", "line_number": 1023, "usage_type": "call"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest.split_by_time_period", "line_number": 1040, "usage_type": "call"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest.split_by_time_period", "line_number": 1045, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 1052, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 1053, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 1056, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1056, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 1060, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1060, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 1064, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1064, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 1065, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1065, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 1066, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1066, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 1068, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1068, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 1070, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1070, "usage_type": "name"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest.TestResult", "line_number": 1072, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 1094, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 1095, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 1100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 1104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 1108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 1109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 1110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 1112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 1114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1114, "usage_type": "name"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest.TestResult", "line_number": 1116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 1142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1142, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 1146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1146, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 1150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1150, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 1151, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1151, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 1152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1152, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 1154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1154, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 1156, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1156, "usage_type": "name"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest.TestResult", "line_number": 1158, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 1180, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 1181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 1186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1186, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 1190, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1190, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 1194, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1194, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 1195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1195, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 1196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1196, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 1198, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1198, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 1200, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1200, "usage_type": "name"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest.TestResult", "line_number": 1202, "usage_type": "call"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest._get_orders_debug_print", "line_number": 1213, "usage_type": "call"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest.TestResult", "line_number": 1220, "usage_type": "call"}, {"api_name": "jinja2.Template", "line_number": 1261, "usage_type": "call"}, {"api_name": "sgmtradingcore.strategies.config.configurations.TRADING_USER_MAP", "line_number": 1304, "usage_type": "name"}, {"api_name": "sgmtradingcore.strategies.config.configurations.TRADING_USER_MAP", "line_number": 1305, "usage_type": "name"}, {"api_name": "sgmtradingcore.strategies.config.configurations.TRADING_USER_MAP", "line_number": 1307, "usage_type": "name"}, {"api_name": "sgmtradingcore.backtesting.persistence.MongoStrategyHelper", "line_number": 1331, "usage_type": "call"}, {"api_name": "sgmtradingcore.backtesting.persistence", "line_number": 1331, "usage_type": "name"}, {"api_name": "tempfile.mkdtemp", "line_number": 1332, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 1334, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 1335, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 1375, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1375, "usage_type": "call"}, {"api_name": "sgmtradingcore.strategies.realtime.StrategyFactory", "line_number": 1386, "usage_type": "name"}, {"api_name": "sgmtradingcore.backtesting.backtest.FrameworkHistoricalProviders", "line_number": 1386, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 1400, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 1401, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 1460, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 1463, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 1469, "usage_type": "call"}, {"api_name": "sgmtradingcore.backtesting.backtest_runner.run_backtest_main", "line_number": 1471, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 1486, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 1489, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 1490, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 1495, "usage_type": "call"}, {"api_name": "sgmtradingcore.strategies.realtime.StrategyFactory", "line_number": 1501, "usage_type": "name"}, {"api_name": "stratagemdataprocessing.data_api.get_capital_timeseries", "line_number": 1513, "usage_type": "call"}, {"api_name": "sgmtradingcore.strategies.strategy_base.StrategyStyle.to_str", "line_number": 1521, "usage_type": "call"}, {"api_name": "sgmtradingcore.strategies.strategy_base.StrategyStyle", "line_number": 1521, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 1524, "usage_type": "call"}, {"api_name": "sgmtradingcore.strategies.realtime.StrategyFactory", "line_number": 1529, "usage_type": "name"}, {"api_name": "sgmtradingcore.backtesting.persistence.MongoStrategyHelper", "line_number": 1587, "usage_type": "call"}, {"api_name": "sgmtradingcore.backtesting.persistence", "line_number": 1587, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 1588, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 1589, "usage_type": "call"}, {"api_name": "stratagemdataprocessing.parsing.common.stickers.sticker_parts_from_sticker", "line_number": 1595, "usage_type": "call"}, {"api_name": "sgmtradingcore.backtesting.persistence.fetch_fixtures_ids", "line_number": 1596, "usage_type": "call"}, {"api_name": "sgmtradingcore.backtesting.persistence", "line_number": 1596, "usage_type": "name"}, {"api_name": "stratagemdataprocessing.parsing.common.stickers.sticker_parts_from_sticker", "line_number": 1600, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 1615, "usage_type": "call"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest.get_strategy_code_from_instruction", "line_number": 1622, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 1624, "usage_type": "call"}, {"api_name": "sgmtradingcore.backtesting.persistence.MongoStrategyHelper", "line_number": 1630, "usage_type": "call"}, {"api_name": "sgmtradingcore.backtesting.persistence", "line_number": 1630, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 1631, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 1632, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 1634, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 1635, "usage_type": "call"}, {"api_name": "sgmtradingcore.strategies.realtime.StrategyFactory", "line_number": 1643, "usage_type": "name"}, {"api_name": "sgmtradingcore.backtesting.persistence.MongoStrategyHelper", "line_number": 1650, "usage_type": "call"}, {"api_name": "sgmtradingcore.backtesting.persistence", "line_number": 1650, "usage_type": "name"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus.SETTLED", "line_number": 1707, "usage_type": "attribute"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus", "line_number": 1707, "usage_type": "name"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus.CANCELLED", "line_number": 1707, "usage_type": "attribute"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus.SETTLED", "line_number": 1712, "usage_type": "attribute"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus", "line_number": 1712, "usage_type": "name"}, {"api_name": "sgmtradingcore.core.trading_types.OrderStatus.CANCELLED", "line_number": 1712, "usage_type": "attribute"}, {"api_name": "sgmtradingcore.strategies.realtime.StrategyFactory", "line_number": 1720, "usage_type": "name"}, {"api_name": "sgmtradingcore.backtesting.backtest.FrameworkHistoricalProviders", "line_number": 1721, "usage_type": "name"}, {"api_name": "tempfile.mkdtemp", "line_number": 1741, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 1803, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1803, "usage_type": "call"}, {"api_name": "sgmtradingcore.strategies.realtime.StrategyFactory", "line_number": 1819, "usage_type": "name"}, {"api_name": "tempfile.mkdtemp", "line_number": 1845, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 1877, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1877, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 1879, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1879, "usage_type": "call"}, {"api_name": "sgmtradingcore.strategies.realtime.StrategyFactory", "line_number": 1893, "usage_type": "name"}, {"api_name": "tempfile.mkdtemp", "line_number": 1928, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 1958, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1958, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 1960, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1960, "usage_type": "call"}, {"api_name": "sgmtradingcore.strategies.realtime.StrategyFactory", "line_number": 1970, "usage_type": "name"}, {"api_name": "datetime.datetime.combine", "line_number": 1990, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1990, "usage_type": "attribute"}, {"api_name": "datetime.datetime.min.time", "line_number": 1990, "usage_type": "call"}, {"api_name": "datetime.datetime.combine", "line_number": 1991, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1991, "usage_type": "attribute"}, {"api_name": "datetime.datetime.max.time", "line_number": 1991, "usage_type": "call"}, {"api_name": "datetime.datetime.combine", "line_number": 2022, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 2022, "usage_type": "attribute"}, {"api_name": "datetime.datetime.min.time", "line_number": 2022, "usage_type": "call"}, {"api_name": "datetime.datetime.combine", "line_number": 2023, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 2023, "usage_type": "attribute"}, {"api_name": "datetime.datetime.max.time", "line_number": 2023, "usage_type": "call"}, {"api_name": "tempfile.mkdtemp", "line_number": 2024, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ioff", "line_number": 2072, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 2072, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 2074, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 2076, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 2077, "usage_type": "call"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest.split_by_time_period", "line_number": 2078, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 2082, "usage_type": "call"}, {"api_name": "sgmtradingcore.backtesting.automatic_backtest.split_by_time_period", "line_number": 2083, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 2088, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 2092, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 2096, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 2108, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 2112, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 2117, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 2121, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 2125, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 2138, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 2142, "usage_type": "call"}]} +{"seq_id": "14716113042", "text": "import argparse\nimport json\nimport logging\nfrom pathlib import Path\nfrom rdflib import Graph, BNode\nfrom rdflib.namespace import split_uri\n\nlogging.basicConfig(level=logging.INFO)\n\nparser = argparse.ArgumentParser(\n description=\"Compute the concise bounded description for each subject in the input file.\")\nparser.add_argument('--source',\n default=Path('data/temp/all.nt'),\n type=Path,\n help=\"Path to the input file. Default is `data/temp/all.nt`.\")\nparser.add_argument('--output',\n help=\"Path to the folder for the output files\",\n type=Path,\n default=Path('_includes/cbds')\n )\nparser.add_argument('--base',\n help=\"Base URI of the resources to be considered.\",\n default=\"\")\nargs = parser.parse_args()\nall = Graph()\nlogging.info(f\" parsing {args.source} ...\")\nall.parse(args.source)\nlogging.info(\" done.\")\n\nfor subject in all.subjects(unique=True):\n logging.debug(f\" considering {subject} for cbd computation ...\")\n try:\n namespace = str(subject)\n if subject == args.base:\n namespace = split_uri(subject)[0]\n if args.base in namespace:\n logging.debug(\" ... computing cbd.\")\n cbd = all.cbd(subject)\n name = subject.removeprefix(args.base)\n name = name.replace(\"/\", \"_\")\n name = f\"_{name}\"\n cbd_dict = json.loads(cbd.serialize(format=\"json-ld\"))\n out_path = args.output / name\n outpath = f\"{out_path}.json\"\n with open(outpath, 'w') as cbd_file:\n json.dump(cbd_dict, cbd_file, indent=2)\n except ValueError:\n next\n\n\n\n\n", "repo_name": "berlinonline/lod-berlin-einwohner", "sub_path": "bin/compute_cbds.py", "file_name": "compute_cbds.py", "file_ext": "py", "file_size_in_byte": 1752, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "logging.basicConfig", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 8, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 13, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 14, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 18, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 19, "usage_type": "call"}, {"api_name": "rdflib.Graph", "line_number": 25, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 26, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 28, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 31, "usage_type": "call"}, {"api_name": "rdflib.namespace.split_uri", "line_number": 35, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 37, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 42, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "74608117120", "text": "import requests\nimport os\nfrom config import STRAVA_CLIENT_ID, STRAVA_CLIENT_SECRET, STRAVA_TOKEN_URL\nimport psycopg2\nfrom dotenv import load_dotenv\n\nload_dotenv() # Load environment variables from .env file\nDB_PW = os.getenv('DB_STRAVA_ACCESS_TOKENS_PW')\n\n\ndef create_conn():\n conn = psycopg2.connect(\n host='34.133.105.15',\n dbname='gcp_strava_data',\n user='postgres',\n password=DB_PW\n )\n return conn\n\ndef refresh_strava_tokens():\n \"\"\"Refreshes Strava tokens using the refresh token present in the database.\"\"\"\n\n # Creating a connection to your database\n conn = create_conn()\n\n # If connection successful, fetch the refresh token stored in the database\n with conn.cursor() as cursor:\n # Query to select all athletes' refresh tokens from your specific table\n cursor.execute(\"SELECT athlete_id, refresh_token, access_token FROM strava_access_tokens\")\n\n # Fetch all athletes\n athletes = cursor.fetchall()\n\n if not athletes:\n print('No athletes found in the database.')\n return\n\n # For each athlete, refresh their Strava token\n for athlete_id, refresh_token, access_token in athletes:\n\n # Use the refresh_token from the database to get a new access_token\n payload = {\n 'client_id': STRAVA_CLIENT_ID,\n 'client_secret': STRAVA_CLIENT_SECRET,\n 'refresh_token': refresh_token,\n 'grant_type': 'refresh_token'\n }\n\n response = requests.post(STRAVA_TOKEN_URL, params=payload)\n\n # If the request was successful, the new access token, refresh token, and expiration date will be in the response\n if response.status_code == 200:\n new_access_token = response.json()['access_token']\n new_refresh_token = response.json()['refresh_token']\n new_expires_at = response.json()['expires_at']\n new_expires_in = response.json()['expires_in']\n\n # Save these new tokens to your database\n with conn.cursor() as cursor:\n # Check if the old access token and the new access token are the same\n if access_token == new_access_token:\n cursor.execute(\"\"\"\n UPDATE strava_access_tokens\n SET last_updated = now(),\n expires_in = %s,\n total_refresh_checks = total_refresh_checks + 1,\n last_refreshed_by = 'local_script'\n WHERE athlete_id = %s\n \"\"\",\n (new_expires_in, athlete_id))\n print(f'No token update (but updated relevant fields) for athlete {athlete_id}.')\n else:\n cursor.execute(\"\"\"\n UPDATE strava_access_tokens\n SET access_token = %s,\n refresh_token = %s,\n expires_at = %s,\n expires_in = %s,\n last_updated = now(),\n total_refreshes = total_refreshes + 1,\n total_refresh_checks = total_refresh_checks + 1,\n last_refreshed_by = 'local_script'\n WHERE athlete_id = %s\n \"\"\",\n (new_access_token, new_refresh_token, new_expires_at, new_expires_in, athlete_id))\n print(f'Successfully updated tokens for athlete {athlete_id}.')\n # Commit the transaction\n conn.commit()\n\n else:\n print(f'Failed to refresh token for athlete {athlete_id}:', response.json())\n\n\n\nif __name__ == \"__main__\":\n refresh_strava_tokens()", "repo_name": "tillo13/gcp-strava", "sub_path": "local_refresh_strava_tokens.py", "file_name": "local_refresh_strava_tokens.py", "file_ext": "py", "file_size_in_byte": 3780, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 7, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 8, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 12, "usage_type": "call"}, {"api_name": "config.STRAVA_CLIENT_ID", "line_number": 43, "usage_type": "name"}, {"api_name": "config.STRAVA_CLIENT_SECRET", "line_number": 44, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 49, "usage_type": "call"}, {"api_name": "config.STRAVA_TOKEN_URL", "line_number": 49, "usage_type": "argument"}]} +{"seq_id": "11917504207", "text": "# -*- coding:utf-8 -*-\n# author : 丁雪峰\n# time : 2016-08-17 02:07:56\n# email : fengidri@yeah.net\n# version : 1.0.1\n\n\n\nimport os\nimport sys\nimport json\nimport time\n\nj = open('slow_rate.json').read()\ndata = json.loads(j)['data']\n\nkeys = [ int(x) for x in data.keys()]\nkeys.sort()\n\nvalues = [data[str(k)][0][1] for k in keys]\n\ndef time_formatter(timestamp, pos):\n \"show time hour and mins look like: HH:MM\"\n timestamp += 8 * 3600\n\n return time.strftime(\"%H:%M\", time.gmtime(timestamp))\n\n#####################################################\nopt_x = keys\nopt_y = values\nopt_title = \"MG\"\nopt_xlabel = \"Time\"\nopt_ylabel = \"SlowRate\"\n#####################################################\n\nimport matplotlib as mpl\nmpl.use(\"Agg\")\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MultipleLocator, FuncFormatter\n\nplt.figure(figsize=(20, 10.5))\nplt.plot(opt_x, opt_y)\nplt.xlabel(opt_xlabel)\nplt.ylabel(opt_ylabel)\nplt.title(opt_title)\n#plt.ylim()\n#plt.legend()\n\nax = plt.gca()\nax.xaxis.set_major_locator(MultipleLocator(300 * 12))\nax.xaxis.set_major_formatter(FuncFormatter(time_formatter))\nax.xaxis.set_minor_locator( MultipleLocator(300) )\n\nplt.savefig(\"/var/http/slowrate.png\")\n\n\n\n\n\n\n", "repo_name": "fengidri/python-script", "sub_path": "plot/plot.py", "file_name": "plot.py", "file_ext": "py", "file_size_in_byte": 1248, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "json.loads", "line_number": 15, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 26, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.use", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.ticker.MultipleLocator", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.ticker.FuncFormatter", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.ticker.MultipleLocator", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}]} +{"seq_id": "158559306", "text": "import spotipy\nimport time\n\n# Get playlist name from the spotify API.\ndef playlist_name(playlist_id: str, token: str) -> dict:\n sp = spotipy.Spotify(auth=token)\n try:\n playlist = sp.playlist(playlist_id, fields=\"name\")\n return playlist\n except Exception as e:\n None\n\n\n# Get playlist items from the spotify API.\ndef get_api_playlist_tracks(playlist_id: str, token: str) -> list:\n sp = spotipy.Spotify(auth=token)\n try:\n playlist = sp.playlist_items(\n playlist_id,\n fields=\"items(added_at,added_by(id),track(artists(name,id),name,id,duration_ms,popularity,album(name)))\",\n )\n seprate_request = [ele for ele in playlist[\"items\"]]\n list_of_dict = []\n for ele in seprate_request:\n added_at = {\"added_at\": ele[\"added_at\"]}\n playlist_name = {\"playlist_name\": ele[\"added_by\"][\"id\"]}\n artist_name = {\n \"artist_name\": [a_dict[\"name\"] for a_dict in ele[\"track\"][\"artists\"]]\n }\n artist_id = {\n \"artistID\": [a_dict[\"id\"] for a_dict in ele[\"track\"][\"artists\"]]\n }\n song_name = {\"song_name\": ele[\"track\"][\"name\"]}\n song_id = {\"songID\": ele[\"track\"][\"id\"]}\n album_name = {\"album_name\": ele[\"track\"][\"album\"][\"name\"]}\n popularity = {\"popularity\": ele[\"track\"][\"popularity\"]}\n duration_ms = {\"duration_ms\": ele[\"track\"][\"duration_ms\"]}\n concat_dict = {\n **playlist_name,\n **artist_name,\n **artist_id,\n **song_name,\n **song_id,\n **album_name,\n **popularity,\n **duration_ms,\n **added_at,\n }\n list_of_dict.append(concat_dict)\n return list_of_dict\n except Exception as e:\n return None\n\n\n# Get genre of artits from the spotify API.\ndef get_api_artist(artist_id: str, token: str) -> dict:\n sp = spotipy.Spotify(auth=token)\n try:\n artist_response = sp.artists([artist_id])\n seprate_artist_response = [ele for ele in artist_response[\"artists\"]]\n for ele in seprate_artist_response:\n return ele['genres']\n except Exception as e:\n return None\n\n\n# Get track features from the spotify API.\ndef get_api_features(track_id: str, token: str) -> dict:\n sp = spotipy.Spotify(auth=token)\n try:\n features = sp.audio_features([track_id])\n return features[0]\n except Exception as e:\n return None\n\n\n# Convert miliseconds to minutes and seconds.\ndef convertMillis(millis: int) -> float:\n seconds = millis / 1000\n ty_res = time.gmtime(seconds)\n res = float(time.strftime(\"%M.%S\", ty_res))\n return res", "repo_name": "szymonkrupa/Spotifstory", "sub_path": "features.py", "file_name": "features.py", "file_ext": "py", "file_size_in_byte": 2776, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "spotipy.Spotify", "line_number": 6, "usage_type": "call"}, {"api_name": "spotipy.Spotify", "line_number": 16, "usage_type": "call"}, {"api_name": "spotipy.Spotify", "line_number": 57, "usage_type": "call"}, {"api_name": "spotipy.Spotify", "line_number": 69, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 80, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "73926732157", "text": "# -*- coding: utf-8 -*-\nimport datetime\nfrom dateutil.relativedelta import relativedelta\n\nfrom odoo import models, fields, api, _\nfrom datetime import datetime, time\n\nfrom odoo.addons.resource.models.resource import HOURS_PER_DAY\n\n\n\nclass HrHolidays(models.Model):\n _inherit = \"hr.leave.allocation\"\n\n state = fields.Selection(\n track_visibility=None\n )\n annual_cron_date = fields.Date(\n string=\"Annual Date\"\n )\n\n\n\n # def name_get(self):\n # res = []\n #\n # for leave in self:\n # if leave.annual_cron_date:\n # current_date = fields.Date.from_string(leave.annual_cron_date)\n # else:\n # current_date = fields.Date.from_string(leave.create_date)\n # if current_date:\n # current_month = current_date.strftime(\"%B\")\n # current_year = current_date.year\n #\n # if leave.holiday_status_id.work_entry_type_id.code == 'LEAVE120' and leave.holiday_status_id.name:\n # # name = leave.holiday_status_id.name + ' Allocation - '+ leave.employee_id.name + ' - ' + current_month + ' ' + str(current_year)\n # name = leave.holiday_status_id.name + ' Allocation - ' + current_month + ' ' + str(current_year)\n # res.append((leave.id, name))\n # else:\n # res.append((leave.id, _(\"%s on %s : %.2f day(s)\") % (\n # leave.employee_id.name or leave.category_id.name, leave.holiday_status_id.name,\n # leave.number_of_days)))\n # return res\n\n @api.model\n def _create_allocation_request_by_cron(self, employees, leave_type, leave_number):\n\n current_date = fields.Date.from_string(fields.Date.today())\n\n current_month = current_date.strftime(\"%B\")\n current_year = current_date.year\n\n\n\n last_day = current_date + relativedelta(day=1, months=+1, days=-1)\n date_to = last_day.strftime(\"%Y-%m-%d\")\n first_day = current_date + relativedelta(day=1)\n date_from = first_day.strftime(\"%Y-%m-%d\")\n\n for emp in employees.filtered(lambda e: e.contract_id):\n print('---emp----',emp)\n print('---entered for loop---')\n contracts = self.env['hr.contract'].browse()\n print('----cont----',contracts)\n was_on_full_month_leave = False\n\n # worked_days_line_ids = self.env['hr.payslip']._get_worked_day_lines()\n #\n # for line in worked_days_line_ids:\n # if line['work_entry_type_id.code'] == 'WORK100':\n # if line['number_of_days'] == 0.0:\n # was_on_full_month_leave = True\n # break\n\n if not was_on_full_month_leave:\n print('----entered if cond---')\n name = leave_type.name + ' Allocation - ' + emp.name + ' - ' + current_month + ' ' + str(current_year)\n vals = {'name': name,\n 'employee_id': emp.id,\n 'number_of_days': leave_number,\n 'department_id': emp.department_id.id,\n 'holiday_status_id': leave_type.id,\n 'holiday_type': 'employee',\n 'allocation_type': 'accrual',\n 'annual_cron_date': fields.Date.today(),\n }\n\n print('-----leave values-----',vals)\n holiday = self.env['hr.leave.allocation'].create(vals)\n #holiday.action_confirm()\n if holiday.state == 'confirm':\n holiday.action_validate()\n\n # print('----worked_dys_line----',worked_days_line_ids)\n\n\n\n @api.model\n def cron_legal_leave_allocation_request(self):\n\n employees = self.env['hr.employee'].search([])\n print('-------employeee list----',employees)\n\n leave_type = self.env['hr.leave.type'].search([('work_entry_type_id.code', 'ilike', 'LEAVE120')])\n leave_number = 2.5\n self._create_allocation_request_by_cron(employees, leave_type, leave_number)\n\n", "repo_name": "Elhamari/Odoo-Plugins", "sub_path": "ag_annual_leave/models/hr_holiday.py", "file_name": "hr_holiday.py", "file_ext": "py", "file_size_in_byte": 4164, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "odoo.models.Model", "line_number": 12, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 12, "usage_type": "name"}, {"api_name": "odoo.fields.Selection", "line_number": 15, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 15, "usage_type": "name"}, {"api_name": "odoo.fields.Date", "line_number": 18, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 18, "usage_type": "name"}, {"api_name": "odoo.fields.Date.from_string", "line_number": 49, "usage_type": "call"}, {"api_name": "odoo.fields.Date", "line_number": 49, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 49, "usage_type": "name"}, {"api_name": "odoo.fields.Date.today", "line_number": 49, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 56, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 58, "usage_type": "call"}, {"api_name": "odoo.fields.Date.today", "line_number": 86, "usage_type": "call"}, {"api_name": "odoo.fields.Date", "line_number": 86, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 86, "usage_type": "name"}, {"api_name": "odoo.api.model", "line_number": 46, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 46, "usage_type": "name"}, {"api_name": "odoo.api.model", "line_number": 99, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 99, "usage_type": "name"}]} +{"seq_id": "38678681315", "text": "import dash_glue42\nfrom dash import Dash, Input, Output, html, dcc, callback_context\nfrom dash.exceptions import PreventUpdate\nimport dash_bootstrap_components as dbc\nimport json\n\n# Loading clients data.\nwith open(\"data/clients.json\", encoding=\"utf-8\") as f:\n clients_data = json.load(f)\n\napp = Dash(__name__,\n external_stylesheets=[dbc.themes.BOOTSTRAP]\n)\nserver = app.server\napp.enable_dev_tools()\n\n# Dropdown option that will be used to leave the current Channel.\nno_channel = { \"label\": \"No Channel\", \"value\": \"\" }\n\ndef update_client_card(client):\n open_card = True\n if client is None:\n client = {}\n open_card = False\n\n return [\n format_client_name(client),\n \"PORTFOLIO VALUE: $ {}\".format(client.get(\"portfolioValue\")),\n client.get(\"email\"),\n client.get(\"phone\"),\n client.get(\"about\"),\n\n # Open collapsable card.\n open_card\n ]\n\n\ndef publish_in_channel(client_id):\n return {\n \"data\": {\n \"clientId\": client_id\n }\n }\n\ndef format_client_name(client):\n first_name = client.get(\"firstName\")\n last_name = client.get(\"lastName\")\n return f\"{first_name} {last_name}\"\n\ndef find_client(client_id):\n for client in clients_data:\n if client[\"id\"] == client_id:\n return client\n return None\n\nchannel_selector = html.Div(id=\"channels-selector\", className=\"w-25\", children=[\n html.Label(\"Select Channel: \"),\n dcc.Dropdown(id=\"channels-list\", clearable=False),\n])\n\nclient_details_card = dbc.Card(dbc.CardBody(\n [\n html.H4(id=\"client-name\"),\n html.Div(id=\"client-portfolio-value\"),\n html.Div(id=\"client-email\"),\n html.Div(id=\"client-phone\"),\n html.P(id=\"client-details\")\n ]\n ))\n\n# Initiate Glue42 (io.Connect) library.\napp.layout = dash_glue42.Glue42(id=\"glue42\", settings={\n \"desktop\": {\n \"config\": {\n \"channels\": True\n }\n }\n}, children=[\n\n # Glue42 functionality.\n dash_glue42.Channels(id=\"g42-channels\"),\n\n # UI\n html.Div(id=\"page-content\", style={ \"padding\": \"10px 15px\" }, children=[\n html.Div(\n className=\"d-flex justify-content-between mb-2\", \n children=[\n html.H1(\"Clients\"),\n channel_selector\n ]),\n\n # Client details card.\n dbc.Collapse(\n id=\"client-collapse\",\n className=\"mb-2\",\n children=client_details_card\n ),\n\n # Clients List\n dbc.ListGroup(\n [dbc.ListGroupItem(id=client[\"id\"], n_clicks=0, action=True, \n children=[\n html.Div(format_client_name(client)),\n html.Div(\"$ {}\".format(client[\"portfolioValue\"])),\n ]) for client in clients_data]\n )\n ])\n])\n\n\n@app.callback(\n Output(\"channels-selector\", \"style\"),\n Input(\"glue42\", \"isEnterprise\")\n)\ndef channels_selector_visibility(isEnterprise):\n show_selector = (isEnterprise is None) or not isEnterprise\n visibility = \"visible\" if show_selector else \"hidden\"\n\n return {\n \"visibility\": visibility\n }\n\n\ndef channels_to_dpd_options(channels):\n if channels is not None:\n options = map(lambda channel: {\n \"label\": channel.get('name'), \"value\": channel.get('name')}, channels)\n return [no_channel] + list(options)\n\n return [no_channel]\n\n\n@app.callback(\n Output(\"channels-list\", \"options\"),\n Input(\"g42-channels\", \"list\")\n)\ndef update_channels_list(channels_list):\n \"\"\"Discovering all channels.\"\"\"\n\n return channels_to_dpd_options(channels_list)\n\n\n@app.callback(\n Output(\"g42-channels\", \"join\"),\n Input(\"channels-list\", \"value\"),\n prevent_initial_call=True\n)\ndef join_channel(channel_name):\n \"\"\"Join a channel programmatically.\"\"\"\n\n if channel_name == no_channel[\"value\"]:\n raise PreventUpdate\n\n return {\n \"name\": channel_name\n }\n\n\n@app.callback(\n Output(\"g42-channels\", \"leave\"),\n Input(\"channels-list\", \"value\")\n)\ndef leave_channel(channel_name):\n \"\"\"Leave a channel programmatically.\"\"\"\n\n if channel_name == no_channel[\"value\"]:\n return {}\n\n raise PreventUpdate\n\n\n@app.callback(\n Output(\"g42-channels\", \"publish\"),\n [Input(client[\"id\"], \"n_clicks\") for client in clients_data]\n)\ndef handle_client_clicked(*buttons):\n \"\"\"Publish the selected client to the channel's context.\"\"\"\n\n ctx = callback_context\n if not ctx.triggered:\n raise PreventUpdate\n\n # Button ID is mapped to the client ID.\n client_id = ctx.triggered[0][\"prop_id\"].split(\".\")[0]\n client = find_client(client_id)\n if client is None:\n raise PreventUpdate\n\n return publish_in_channel(client_id)\n\n\n@app.callback(\n [\n Output(\"client-name\", \"children\"),\n Output(\"client-portfolio-value\", \"children\"),\n Output(\"client-email\", \"children\"),\n Output(\"client-phone\", \"children\"),\n Output(\"client-details\", \"children\"),\n Output(\"client-collapse\", \"is_open\")\n ],\n Input(\"g42-channels\", \"my\")\n)\ndef channel_data_changed(channel):\n print('channel', channel)\n if (channel is None) or (not (\"data\" in channel)) or (channel[\"data\"] is None):\n return update_client_card(None)\n\n client_id = channel[\"data\"].get(\"clientId\")\n client = find_client(client_id)\n\n return update_client_card(client)\n\n\nif __name__ == \"__main__\":\n app.run_server(debug=True, host=\"localhost\", port=\"8050\")\n", "repo_name": "sguzunov/clients-dash-app", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 5503, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "json.load", "line_number": 9, "usage_type": "call"}, {"api_name": "dash.Dash", "line_number": 11, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.themes", "line_number": 12, "usage_type": "attribute"}, {"api_name": "dash.html.Div", "line_number": 56, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 56, "usage_type": "name"}, {"api_name": "dash.html.Label", "line_number": 57, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 57, "usage_type": "name"}, {"api_name": "dash.dcc.Dropdown", "line_number": 58, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 58, "usage_type": "name"}, {"api_name": "dash_bootstrap_components.Card", "line_number": 61, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.CardBody", "line_number": 61, "usage_type": "call"}, {"api_name": "dash.html.H4", "line_number": 63, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 63, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 64, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 64, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 65, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 65, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 66, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 66, "usage_type": "name"}, {"api_name": "dash.html.P", "line_number": 67, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 67, "usage_type": "name"}, {"api_name": "dash_glue42.Glue42", "line_number": 72, "usage_type": "call"}, {"api_name": "dash_glue42.Channels", "line_number": 81, "usage_type": "call"}, {"api_name": "dash.html.Div", "line_number": 84, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 84, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 85, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 85, "usage_type": "name"}, {"api_name": "dash.html.H1", "line_number": 88, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 88, "usage_type": "name"}, {"api_name": "dash_bootstrap_components.Collapse", "line_number": 93, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.ListGroup", "line_number": 100, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.ListGroupItem", "line_number": 101, "usage_type": "call"}, {"api_name": "dash.html.Div", "line_number": 103, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 103, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 104, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 104, "usage_type": "name"}, {"api_name": "dash.Output", "line_number": 112, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 113, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 134, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 135, "usage_type": "call"}, {"api_name": "dash.exceptions.PreventUpdate", "line_number": 152, "usage_type": "name"}, {"api_name": "dash.Output", "line_number": 144, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 145, "usage_type": "call"}, {"api_name": "dash.exceptions.PreventUpdate", "line_number": 169, "usage_type": "name"}, {"api_name": "dash.Output", "line_number": 160, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 161, "usage_type": "call"}, {"api_name": "dash.callback_context", "line_number": 179, "usage_type": "name"}, {"api_name": "dash.exceptions.PreventUpdate", "line_number": 181, "usage_type": "name"}, {"api_name": "dash.exceptions.PreventUpdate", "line_number": 187, "usage_type": "name"}, {"api_name": "dash.Output", "line_number": 173, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 174, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 194, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 195, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 196, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 197, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 198, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 199, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 201, "usage_type": "call"}]} +{"seq_id": "43464718663", "text": "import numpy as np\nimport cv2\nimport os\n\n\ndef video_transfer(src_path, dst_path):\n if not os.path.isfile(src_path):\n print('source file not found')\n exit(0)\n dst_parentdir, dst_filename = os.path.split(dst_path)\n if not os.path.exists(dst_parentdir):\n # make multi level dir\n os.mkdirs(dst_parentdir)\n video_capture = cv2.VideoCapture(src_path)\n fps = video_capture.get(cv2.CAP_PROP_FPS)\n size = (int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)), int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n frame_count = video_capture.get(cv2.CAP_PROP_FRAME_COUNT)\n\n fourcc_specific = {\n '.mp4': 'MJPG',\n '.avi': 'XVID',\n '.ogv': 'THEO',\n '.flv': 'FLV1',\n '.wmv': 'MJPG',\n '.mkv': '3IVX',\n }\n\n (_, file_ext) = os.path.splitext(dst_filename)\n file_ext = file_ext.lower()\n if file_ext not in fourcc_specific:\n print('dist format not support')\n exit(0)\n fourcc_type = fourcc_specific[file_ext]\n video_writer = cv2.VideoWriter(dst_path, cv2.VideoWriter_fourcc(*fourcc_type), fps, size)\n status, frame = video_capture.read()\n i = 0\n while status:\n i = i + 1\n video_writer.write(frame)\n status, frame = video_capture.read()\n if (i % 100 == 0):\n print('processed', int(i / frame_count * 100), '%')\n video_writer.release()\n video_capture.release()\n", "repo_name": "sptj/optesthesia", "sub_path": "format_converter/video_format_converter.py", "file_name": "video_format_converter.py", "file_ext": "py", "file_size_in_byte": 1412, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "os.path.isfile", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.mkdirs", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FPS", "line_number": 15, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_WIDTH", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_HEIGHT", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_COUNT", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "cv2.VideoWriter", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.VideoWriter_fourcc", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "70315924469", "text": "# coding=utf-8\n# ! /usr/bin/env python2\nimport sys\nimport boto3\nimport pyspark\nfrom pyspark.mllib.classification import SVMWithSGD\nfrom pyspark.mllib.regression import LabeledPoint\nfrom pyspark.sql import SparkSession\n\nsc = pyspark.SparkContext()\n\n\n# cette fonction permet de se connecter au bucket et enregistrer toutes les fichier json dans un rdd\ndef s3ToRdd():\n s3 = boto3.resource('s3')\n bucket = \"oc-datascience-p2\" # nom de ma bucket\n bucketObj = s3.Bucket(bucket)\n\n key_list = []\n\n spark = SparkSession.builder.getOrCreate()\n spark.sparkContext.setLogLevel('ERROR')\n\n for my_bucket_object in bucketObj.objects.filter(Prefix='features/'):\n if my_bucket_object.key != \"features/\":\n # mettre le nom de fichier dans name\n name = my_bucket_object.key\n # mettre les données dans data et changer les byte en str pour pouvoir le mettre dans un tuple.\n # Pour ressembler au rdd qu'on peut manipuler.\n response = my_bucket_object.get()\n data = response['Body'].read().decode(\"utf-8\")\n key_list.append((name, data))\n\n rdd = sc.parallelize(key_list)\n return rdd\n\n\n\n\n# cette fonction permet de trouver le nom de la race par rapport au nom complet de l'image\n# val: est le nom de fichier (image)\n# return : nom de la race\ndef race(val):\n return '_'.join(val.split('/')[-1].split('_')[:-1])\n\n\n# cette fonction nettoie le format des valeurs de RDD d'entrée\n# data: RDD qui doit être nettoyer\n# return : RDD propre, sans espace, split sur les \",\"\ndef format_data(data):\n return list(map(lambda x: float(x), data.strip(\"[]\").replace(' ', '').split(\",\")))\n\n\n# fonction qui renvoie 1 si une chaine de charactère donnée, est dans une autre chaine. Sinon 0.\n# cela permet de distinguer le race que l'ont souhaite de tester\n# label : race qu'on test\n# x: dans la chaine qu'on test\n# return : 1 si la label est dans x, sinon 0\ndef put_label(label, x):\n if (label.lower() in x.lower()):\n return 1\n else:\n return 0\n\n\n# fonction qui filtre 2 races dans toutes les races\n# x : la chaine dans la quelle on cherche\n# label1: la 1ere valeur qu'on cherche dans x\n# label2: la 2eme valeur qu'on cherche dans x\n# return : vrai si label1 ou label2 existe dans la chaine x\ndef filter_labels(x, label1, label2):\n return ((label1.lower() in x.lower()) | (label2.lower() in x.lower()))\n\n\n# dans le cas de 1 vs 1, filtre 2 races dans toutes les races. par exemple Abyssinian et American_pit_bull_terrier\n# rdd1Label1 : la 1ere race à filtrer dans rddToFilter\n# rdd1Label2 : la 2eme race à filtrer dans rddToFilter\n# rddToFilter : RDD qui contient la totalité des races\n# return : un RDD avec les 2 races recherché.\ndef filter1(rdd1Label1, rdd2Label0, rddToFilter):\n return rddToFilter.filter(lambda f: filter_labels(f[0], rdd1Label1, rdd2Label0))\n\n\n# dans le cas de 1 vs tous, filtre sur Abyssinian et ensuite met le label = 1 les autres = 0\n# change le nom de la race qu'on test en 1 et tous les autres en 0\n# rdd1Label1 : la race qu'on test\n# rddToChange : RDD dans la quelle on va changer le nom des race\n# return : un nouveau RDD qu'à la place des nom des race contient 1 si c'est la race qu'on cherche ou 0 si ce n'est pas la race qu'on cherche.\ndef changeLabel(rdd1Label1, rddToChange):\n return rddToChange.map(lambda y: (put_label(rdd1Label1, y[0]), y[1]))\n\n\n# Cette fonction est l'intelligence metier du programe, il lance tout le programme\n# sc : pyspark.SparkContext()\n# path :\n# en mode local : chemin de fichier des images transformé en json\n# en mode aws : on met aws, il prend le repertoire de \"features\" dans S3\n# race1 : la 1ere race qu'on test\n# race2 :\n# pour 1 vs 1: la 2eme race qu'on test\n# pour 1 vs tous : None\n# return : le pourcentage de bonne classification du model\ndef process(sc, path, race1, race2):\n # 1 Charger tous les images dans un RDD\n if (path == 'aws'):\n toutRDD = s3ToRdd()\n else:\n toutRDD = sc.wholeTextFiles(path + \"/*.json\")\n\n # 2 retravailler les données des deux colonnes (nom de fichier et les valeurs)\n toutRDDPropre = toutRDD.map(lambda x: (race(x[0]), format_data(x[1])))\n\n # 3\n\n # 1 vs tous changer le nom de la race en 1 et les autres races en 0\n if (race2 == 'None'):\n labelRDD = changeLabel(race1, toutRDDPropre)\n\n # 1 vs 1 filtrer les 2 races et changer le nom de la race1 en 1 et le race2 en 0\n else:\n labelRDDFilter = filter1(race1, race2, toutRDDPropre)\n labelRDD = changeLabel(race1, labelRDDFilter)\n\n # 4 couper en 2 dataframes d'apprentissage et test\n\n # 1 vs tous\n apprentissageRDD, testRDD = labelRDD.randomSplit(weights=[0.7, 0.3], seed=20)\n\n # 5 transformer en labelpoint le RDD d'apprentissage (1, 0000011 (touts les chiffres))\n apprentissageLabelPoint = apprentissageRDD.map(lambda x: LabeledPoint(x[0], x[1]))\n\n # 6 creer le model\n model = SVMWithSGD.train(apprentissageLabelPoint, iterations=100)\n\n # 7 appliquer le model sur l echantillon de test\n resultat = testRDD.map(lambda x: (x[0], model.predict(x[1])))\n\n # 8 calculer la precision, calculer le taux de succès du modèle (le pourcentage de bonnes classifications)\n test_accuracy = float(resultat.filter(lambda a: a[0] == a[1]).count()) / float(testRDD.count())\n\n return test_accuracy\n\n\n# Cette méthode nous permet d'enregistrer le résultat dans un fichier nommé resultat.txt\ndef resultat(path,race1, race2, test_accuracy):\n if (path == 'aws'):\n # information pour la connexion au bucket d'aws\n s3 = boto3.resource('s3')\n s3c = boto3.client('s3')\n bucket = \"oc-datascience-p2\" # nom de ma bucket\n bucketObj = s3.Bucket(bucket)\n\n keyJ = \"resultat.txt\"\n if (race2 == 'None'):\n data = race1+ ' ---> '+ str(test_accuracy)\n else:\n data = race1 + ' - ' + race2 + ' -----> '+ str(test_accuracy)\n\n s3c.put_object(Body=data, Bucket=bucket, Key=keyJ)\n\n else:\n fichier = open(\"resultat.txt\", \"w\")\n if (race2 == 'None'):\n fichier.write(\"\\n local 1 vs tous\")\n fichier.write(race1 + '\\t ---> \\t' + str(test_accuracy))\n fichier.write('\\n' + str(test_accuracy))\n fichier.close()\n\n else:\n fichier.write(\"\\n local 1 vs 1\")\n fichier.write(race1 + ' - \\t' + race2 + ' ---> \\t' + str(test_accuracy))\n fichier.write('\\n' + str(test_accuracy))\n fichier.close()\n\n\ndef main(path, race1, race2):\n test_accuracy = process(sc, path, race1, race2)\n resultat( path, race1, race2, test_accuracy)\n\n\nif __name__ == '__main__':\n if (len(sys.argv) == 4):\n main(*sys.argv[1:4])\n else:\n print(\"nombre argument incorrect\")\n print(\"si vous voulez lancer avec les données de S3, mettez aws après le nom de programme ensuite les 1/2 races\")\n print(\"si vous voulez lancer 1 vs tous il faut mettre None pour le race 2\")", "repo_name": "azadehguilly/OC-DataArchitect-RealisezUnApprentissageDistribue", "sub_path": "classificationRaceAws.py", "file_name": "classificationRaceAws.py", "file_ext": "py", "file_size_in_byte": 6997, "program_lang": "python", "lang": "fr", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "pyspark.SparkContext", "line_number": 10, "usage_type": "call"}, {"api_name": "boto3.resource", "line_number": 15, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder.getOrCreate", "line_number": 21, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 21, "usage_type": "name"}, {"api_name": "pyspark.mllib.regression.LabeledPoint", "line_number": 130, "usage_type": "call"}, {"api_name": "pyspark.mllib.classification.SVMWithSGD.train", "line_number": 133, "usage_type": "call"}, {"api_name": "pyspark.mllib.classification.SVMWithSGD", "line_number": 133, "usage_type": "name"}, {"api_name": "boto3.resource", "line_number": 148, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 149, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 182, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 183, "usage_type": "attribute"}]} +{"seq_id": "5606947130", "text": "#!/usr/bin/env python\n# coding=utf-8\nfrom mcurl.utils import download_info_client\nfrom mcurl.downloader.file_info import FileInfo\nfrom gevent.queue import Queue\nimport gevent\nimport gevent.pool\nimport gevent.event\nfrom mcurl.downloader.queue_data_types import Classification\nimport logging\nfrom mcurl.downloader.file_downloader import FileDownloader\nfrom mcurl.downloader import DBSession\n__author__ = 'chenfengyuan'\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass FilesDownloader:\n def __init__(self, tasks_str, host, port, max_concurrent, filename=None):\n self.client = download_info_client.DownloadInfo(host, port)\n self.tasks_str = tasks_str\n self.tasks = []\n self.max_concurrent = max_concurrent\n self.exit_event = gevent.event.Event()\n self.filename = filename\n\n def init_tasks(self):\n filenames = list()\n for task_str in self.tasks_str:\n if task_str.isnumeric():\n info = self.client.get_info(task_str.encode('utf-8'))\n obj = FileInfo.create_from_download_info(info, self.filename)\n if obj.filename not in filenames:\n filenames.append(obj.filename)\n else:\n if task_str not in filenames:\n filenames.append(task_str)\n file_infos = list(map(FileInfo.get, filenames))\n \"\"\":type: List[FileInfo]\"\"\"\n DBSession().expunge_all()\n gevent.spawn(self.start_server, file_infos)\n inq = Queue()\n filename_outq_map = {}\n \"\"\":type: Dict[str, Queue]\"\"\"\n filename_info_map = {}\n \"\"\":type: Dict[str, FileInfo]\"\"\"\n for filename in filenames:\n filename_outq_map[filename] = Queue()\n for info in file_infos:\n assert isinstance(info, FileInfo)\n assert info.requests, \"%s.request is empty\" % info.filename\n filename_info_map[info.filename] = info\n\n undownload_filenames = list(filenames)\n for _ in range(min(self.max_concurrent, len(undownload_filenames))):\n inq.put((Classification.FILE_FINISHED, ))\n downloading_files = 0\n while undownload_filenames:\n data = inq.get()\n assert data[0] == Classification.FILE_FINISHED\n filename = undownload_filenames.pop(0)\n info = filename_info_map[filename]\n \"\"\":type: FileInfo\"\"\"\n if info.is_finished():\n logger.info(\"%s is finished\", filename)\n continue\n g = gevent.pool.Group()\n obj = FileDownloader(filename_info_map[filename], filename_outq_map[filename], inq, g)\n g.spawn(obj.start)\n downloading_files += 1\n\n while downloading_files:\n data = inq.get()\n assert data[0] == Classification.FILE_FINISHED\n downloading_files -= 1\n self.exit_event.set()\n\n @staticmethod\n def start_server(infos):\n \"\"\"\n :type infos: list[FileInfo]\n \"\"\"\n import zmq.green as zmq\n import zmq.error as zmq_error\n import json\n import gevent\n\n def server():\n context = zmq.Context()\n socket = context.socket(zmq.REP)\n port = None\n start_port = 51130\n end_port = 51140\n for i in range(start_port, end_port):\n try:\n port = socket.bind_to_random_port('tcp://127.0.0.1', min_port=i,\n max_port=i + 1, max_tries=1)\n except zmq_error.ZMQBindError:\n if i == end_port - 1:\n raise\n else:\n continue\n break\n\n def log_print():\n import time\n while True:\n logger.info('')\n logger.info('')\n logger.info('--------------------listen on %s-------------------------------', port)\n logger.info('')\n logger.info('')\n time.sleep(60)\n gevent.spawn(log_print)\n\n while True:\n _ = socket.recv()\n data = []\n for info in infos:\n data.append(dict(filename=info.filename, filesize=info.filesize,\n percentage=sum(info.chunks)/len(info.chunks)))\n data = json.dumps(data)\n socket.send(data.encode('utf-8'))\n return gevent.spawn(server)\n", "repo_name": "chenfengyuan/mcurl", "sub_path": "python3/mcurl/downloader/files_downloader.py", "file_name": "files_downloader.py", "file_ext": "py", "file_size_in_byte": 4567, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "97", "api": [{"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "mcurl.utils.download_info_client.DownloadInfo", "line_number": 21, "usage_type": "call"}, {"api_name": "mcurl.utils.download_info_client", "line_number": 21, "usage_type": "name"}, {"api_name": "gevent.event.Event", "line_number": 25, "usage_type": "call"}, {"api_name": "gevent.event", "line_number": 25, "usage_type": "attribute"}, {"api_name": "mcurl.downloader.file_info.FileInfo.create_from_download_info", "line_number": 33, "usage_type": "call"}, {"api_name": "mcurl.downloader.file_info.FileInfo", "line_number": 33, "usage_type": "name"}, {"api_name": "mcurl.downloader.file_info.FileInfo.get", "line_number": 39, "usage_type": "attribute"}, {"api_name": "mcurl.downloader.file_info.FileInfo", "line_number": 39, "usage_type": "name"}, {"api_name": "mcurl.downloader.DBSession", "line_number": 41, "usage_type": "call"}, {"api_name": "gevent.spawn", "line_number": 42, "usage_type": "call"}, {"api_name": "gevent.queue.Queue", "line_number": 43, "usage_type": "call"}, {"api_name": "gevent.queue.Queue", "line_number": 49, "usage_type": "call"}, {"api_name": "mcurl.downloader.file_info.FileInfo", "line_number": 51, "usage_type": "argument"}, {"api_name": "mcurl.downloader.queue_data_types.Classification.FILE_FINISHED", "line_number": 57, "usage_type": "attribute"}, {"api_name": "mcurl.downloader.queue_data_types.Classification", "line_number": 57, "usage_type": "name"}, {"api_name": "mcurl.downloader.queue_data_types.Classification.FILE_FINISHED", "line_number": 61, "usage_type": "attribute"}, {"api_name": "mcurl.downloader.queue_data_types.Classification", "line_number": 61, "usage_type": "name"}, {"api_name": "gevent.pool.Group", "line_number": 68, "usage_type": "call"}, {"api_name": "gevent.pool", "line_number": 68, "usage_type": "attribute"}, {"api_name": "mcurl.downloader.file_downloader.FileDownloader", "line_number": 69, "usage_type": "call"}, {"api_name": "mcurl.downloader.queue_data_types.Classification.FILE_FINISHED", "line_number": 75, "usage_type": "attribute"}, {"api_name": "mcurl.downloader.queue_data_types.Classification", "line_number": 75, "usage_type": "name"}, {"api_name": "zmq.green.Context", "line_number": 90, "usage_type": "call"}, {"api_name": "zmq.green", "line_number": 90, "usage_type": "name"}, {"api_name": "zmq.green.REP", "line_number": 91, "usage_type": "attribute"}, {"api_name": "zmq.green", "line_number": 91, "usage_type": "name"}, {"api_name": "zmq.error.ZMQBindError", "line_number": 99, "usage_type": "attribute"}, {"api_name": "zmq.error", "line_number": 99, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 114, "usage_type": "call"}, {"api_name": "gevent.spawn", "line_number": 115, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 123, "usage_type": "call"}, {"api_name": "gevent.spawn", "line_number": 125, "usage_type": "call"}]} +{"seq_id": "39187163949", "text": "import json\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom utility.search_filter import filtering_query\nfrom .serializers import SellerSerializers\nfrom .models import SellerModel\n\n\nclass SellerAPI(APIView):\n\n search_fields = [\"seller_id\"]\n\n\ndef put(self, request, id):\n data = {}\n try:\n seller = SellerModel.objects.filter(pk=id).first()\n except SellerModel.DoesNotExist:\n data[\"success\"] = False\n data[\"msg\"] = \"Record Does not exist\"\n data[\"data\"] = []\n return Response(data=data, status=status.HTTP_401_UNAUTHORIZED)\n\n if request.method == \"PUT\":\n serializer = SellerSerializers(seller, request.data)\n if serializer.is_valid():\n serializer.save()\n data[\"success\"] = True\n data[\"msg\"] = \"Data updated successfully\"\n data[\"data\"] = serializer.data\n return Response(data=data, status=status.HTTP_200_OK)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['DELETE'])\ndef delete(request):\n data = {}\n del_id = json.loads(request.body.decode(\"utf-8\"))\n if \"id\" not in del_id:\n data[\"success\"] = False\n data[\"msg\"] = \"Record ID not provided\"\n data[\"data\"] = []\n return Response(data=data, status=status.HTTP_401_UNAUTHORIZED)\n try:\n seller = SellerModel.objects.filter(seller_id__in=del_id[\"id\"])\n except SellerModel.DoesNotExist:\n data[\"success\"] = False\n data[\"msg\"] = \"Record does not exist\"\n data[\"data\"] = []\n return Response(data=data, status=status.HTTP_401_UNAUTHORIZED)\n\n if request.method == \"DELETE\":\n result = seller.update(deleted=1)\n data[\"success\"] = True\n data[\"msg\"] = \"Data deleted successfully.\"\n data[\"deleted\"] = result\n return Response(data=data, status=status.HTTP_200_OK)\n\n\n@api_view(['GET'])\ndef get(request, id=None):\n query_string = request.query_params\n data = {}\n try:\n if id:\n seller = SellerModel.objects.filter(pk=id, deleted=0)\n else:\n seller = SellerModel.objects.filter(deleted=0)\n\n data[\"total_record\"] = len(seller)\n seller, data = filtering_query(seller, query_string, \"seller_id\", \"SELLER\")\n\n except SellerModel.DoesNotExist:\n data[\"success\"] = False\n data[\"msg\"] = \"Record Does not exist\"\n data[\"data\"] = []\n return Response(data=data, status=status.HTTP_401_UNAUTHORIZED)\n\n if request.method == \"GET\":\n serilizer = SellerSerializers(seller, many=True)\n data[\"success\"] = True\n data[\"msg\"] = \"OK\"\n data[\"data\"] = serilizer.data\n return Response(data=data, status=status.HTTP_200_OK)\n\n\n@api_view(['POST'])\ndef create(request):\n data = {}\n if request.method == \"POST\":\n seller = SellerModel()\n serializer = SellerSerializers(seller, data=request.data)\n\n if serializer.is_valid():\n serializer.save()\n data[\"success\"] = True\n data[\"msg\"] = \"Data updated successfully\"\n data[\"data\"] = serializer.data\n return Response(data=data, status=status.HTTP_201_CREATED)\n\n data[\"success\"] = False\n data[\"msg\"] = serializer.errors\n data[\"data\"] = serializer.data\n return Response(data=data, status=status.HTTP_400_BAD_REQUEST)\n\n# ===========================update=================================================\n\n\n@api_view(['POST'])\ndef patch(request, id):\n data = {}\n\n try:\n if id:\n seller = SellerModel.objects.get(pk=id, deleted=0)\n else:\n seller = SellerModel.objects.filter(deleted=0)\n except SellerModel.DoesNotExist:\n data[\"success\"] = False\n data[\"msg\"] = \"Record Does not exist\"\n data[\"data\"] = []\n return Response(data=data, status=status.HTTP_401_UNAUTHORIZED)\n\n if request.method == \"POST\":\n serializer = SellerSerializers(seller, request.data, partial=True)\n\n if serializer.is_valid():\n serializer.save()\n data[\"success\"] = True\n data[\"msg\"] = \"Data updated successfully\"\n data[\"data\"] = serializer.data\n return Response(data=data, status=status.HTTP_200_OK)\n\n data[\"success\"] = False\n data[\"msg\"] = serializer.errors\n data[\"data\"] = []\n return Response(data=data, status=status.HTTP_400_BAD_REQUEST)\n\n\n\n", "repo_name": "DhruvRathod7/dhruv", "sub_path": "Tea_hub/seller/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4569, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "rest_framework.views.APIView", "line_number": 12, "usage_type": "name"}, {"api_name": "models.SellerModel.objects.filter", "line_number": 20, "usage_type": "call"}, {"api_name": "models.SellerModel.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "models.SellerModel", "line_number": 20, "usage_type": "name"}, {"api_name": "models.SellerModel.DoesNotExist", "line_number": 21, "usage_type": "attribute"}, {"api_name": "models.SellerModel", "line_number": 21, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 25, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_401_UNAUTHORIZED", "line_number": 25, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 25, "usage_type": "name"}, {"api_name": "serializers.SellerSerializers", "line_number": 28, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 34, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 34, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 34, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 36, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 36, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 36, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 42, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 47, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_401_UNAUTHORIZED", "line_number": 47, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 47, "usage_type": "name"}, {"api_name": "models.SellerModel.objects.filter", "line_number": 49, "usage_type": "call"}, {"api_name": "models.SellerModel.objects", "line_number": 49, "usage_type": "attribute"}, {"api_name": "models.SellerModel", "line_number": 49, "usage_type": "name"}, {"api_name": "models.SellerModel.DoesNotExist", "line_number": 50, "usage_type": "attribute"}, {"api_name": "models.SellerModel", "line_number": 50, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 54, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_401_UNAUTHORIZED", "line_number": 54, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 54, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 61, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 61, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 61, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 39, "usage_type": "call"}, {"api_name": "models.SellerModel.objects.filter", "line_number": 70, "usage_type": "call"}, {"api_name": "models.SellerModel.objects", "line_number": 70, "usage_type": "attribute"}, {"api_name": "models.SellerModel", "line_number": 70, "usage_type": "name"}, {"api_name": "models.SellerModel.objects.filter", "line_number": 72, "usage_type": "call"}, {"api_name": "models.SellerModel.objects", "line_number": 72, "usage_type": "attribute"}, {"api_name": "models.SellerModel", "line_number": 72, "usage_type": "name"}, {"api_name": "utility.search_filter.filtering_query", "line_number": 75, "usage_type": "call"}, {"api_name": "models.SellerModel.DoesNotExist", "line_number": 77, "usage_type": "attribute"}, {"api_name": "models.SellerModel", "line_number": 77, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 81, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_401_UNAUTHORIZED", "line_number": 81, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 81, "usage_type": "name"}, {"api_name": "serializers.SellerSerializers", "line_number": 84, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 88, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 88, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 88, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 64, "usage_type": "call"}, {"api_name": "models.SellerModel", "line_number": 95, "usage_type": "call"}, {"api_name": "serializers.SellerSerializers", "line_number": 96, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 103, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 103, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 103, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 108, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 108, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 108, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 91, "usage_type": "call"}, {"api_name": "models.SellerModel.objects.get", "line_number": 119, "usage_type": "call"}, {"api_name": "models.SellerModel.objects", "line_number": 119, "usage_type": "attribute"}, {"api_name": "models.SellerModel", "line_number": 119, "usage_type": "name"}, {"api_name": "models.SellerModel.objects.filter", "line_number": 121, "usage_type": "call"}, {"api_name": "models.SellerModel.objects", "line_number": 121, "usage_type": "attribute"}, {"api_name": "models.SellerModel", "line_number": 121, "usage_type": "name"}, {"api_name": "models.SellerModel.DoesNotExist", "line_number": 122, "usage_type": "attribute"}, {"api_name": "models.SellerModel", "line_number": 122, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 126, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_401_UNAUTHORIZED", "line_number": 126, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 126, "usage_type": "name"}, {"api_name": "serializers.SellerSerializers", "line_number": 129, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 136, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 136, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 136, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 141, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 141, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 141, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 113, "usage_type": "call"}]} +{"seq_id": "13036571098", "text": "# Create your views here.\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.template.response import TemplateResponse\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.template.defaultfilters import slugify\nfrom django.utils import simplejson\nfrom django.template.loader import render_to_string\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.core.urlresolvers import reverse\n\nfrom articles.models import Article, ArticleCategory, Author, ArticleImage\nfrom articles.forms import ArticleForm\n\ndef list_articles(request, template=\"article/article-home.html\"):\n data = {}\n ids=[]\n\n featured_article = Article.objects.filter(is_featured = True).order_by('?')[:1]\n for a in featured_article:ids.append(a.id)\n featured_articles = Article.objects.filter(is_featured = True).exclude(id__in = ids).order_by('-id')\n articles = Article.objects.filter(is_featured = False)\n \n data['featured_article'] = featured_article\n data['featured_articles'] = featured_articles\n data['articles'] = articles\n \n return TemplateResponse(request, template, data)\n\ndef article_details(request,slug,template=\"article/article-details.html\"):\n \n article = Article.objects.get(slug = slug)\n data = {}\n data['article'] = article\n articles = Article.objects.all().exclude(id__in = [article.id]).order_by('?')[:4]\n data['articles'] =articles\n return TemplateResponse(request, template, data)\n\ndef add_article(request, template=\"article/add-article.html\"):\n data = {}\n try:\n aid = request.REQUEST.get('aid')\n article = Article.objects.get(id = aid)\n data['article'] = article\n form = ArticleForm(instance = article)\n except:\n article = False\n form = ArticleForm()\n if request.POST:\n if article:form = ArticleForm(request.POST,instance = article)\n else:form = ArticleForm(request.POST)\n \n if form.is_valid():\n savearticleform = form.save(commit=False)\n savearticleform.slug = slugify(savearticleform.title)\n savearticleform.save()\n cover_image_id = request.POST.get('cover_image',False)\n if cover_image_id:\n cover_image = ArticleImage.objects.get(id = int(cover_image_id))\n savearticleform.images.add(cover_image)\n \n photo_ids = request.POST.getlist('images',[])\n is_featured = request.POST.get('is_featured')\n if is_featured == 'True':\n savearticleform.is_featured = True\n else:\n savearticleform.is_featured = False\n if photo_ids:\n for photoid in photo_ids:\n image = ArticleImage.objects.get(id = int(photoid))\n savearticleform.images.add(image)\n savearticleform.save()\n return HttpResponseRedirect(reverse('list_articles'))\n \n data['form'] = form\n return TemplateResponse(request, template, data)\n\ndef delete_article(request):\n aid = request.REQUEST.get('aid')\n article = Article.objects.get(id = aid)\n article.delete()\n return HttpResponseRedirect(reverse('list_articles'))\n \ndef delete_image(request):\n data={}\n id = request.REQUEST.get('id')\n image = ArticleImage.objects.get(id = id)\n image.delete()\n \n data['status'] = 1\n return HttpResponse(simplejson.dumps(data))\n\n@csrf_exempt\ndef upload_article_image(request):\n data = {}\n sdata = {}\n file = request.FILES['articleimage']\n iscover = request.REQUEST.get('cover',False)\n try:\n image = ArticleImage()\n image.image = file\n if iscover:image.cover_image = True\n image.save()\n sdata['image'] = image\n if iscover:sdata['cover_image'] = True\n data['html']=render_to_string('article/include-article-image.html',sdata,context_instance=RequestContext(request))\n if iscover:data['cover'] = 1\n except:\n pass\n data['status'] = 1\n return HttpResponse(simplejson.dumps(data))\n\n######################################################ARTICLE CATEGORIES VIEWS START################################################\ndef categories(request, template=\"article/categories.html\"):\n data = {}\n categories = ArticleCategory.objects.all()\n data['categories'] = categories\n return TemplateResponse(request, template, data)\n\n@csrf_exempt\ndef add_category(request):\n data = {}\n sdata = {}\n id = request.REQUEST.get('catid',False)\n if id:\n category = ArticleCategory.objects.get(id = int(id))\n if not request.POST:data['category'] = category\n \n if request.method=='POST':\n if not id:\n category = ArticleCategory()\n category.name = name = request.POST.get('name')\n category.slug = slugify(name)\n category.save()\n categories = ArticleCategory.objects.all()\n sdata['categories'] = categories\n data['html']=render_to_string('article/include-article-categories.html',sdata,context_instance=RequestContext(request))\n data['status'] = 1\n return HttpResponse(simplejson.dumps(data))\n #else:return HttpResponse('0')\n return render_to_response('article/add-category.html',data,context_instance=RequestContext(request))\n\ndef delete_category(request):\n data = {}\n sdata = {}\n id = request.GET.get('id',False)\n \n category = ArticleCategory.objects.get(id = int(id))\n category.delete()\n data['status'] = 1\n return HttpResponse(simplejson.dumps(data))\n\n######################################################ARTICLE CATEGORIES VIEWS END################################################\n######################################################ARTICLE AUTHOR VIEWS START################################################\n\ndef authors(request, template=\"article/authors.html\"):\n data = {}\n authors = Author.objects.all()\n data['authors'] = authors\n return TemplateResponse(request, template, data)\n\n@csrf_exempt\ndef add_author(request):\n data = {}\n sdata = {}\n id = request.REQUEST.get('authid',False)\n if id:\n author = Author.objects.get(id = int(id))\n if not request.POST:data['author'] = author\n \n if request.method=='POST':\n if not id:\n author = Author()\n author.name = name = request.POST.get('name')\n author.slug = slugify(name)\n author.save()\n authors = Author.objects.all()\n sdata['authors'] = authors\n data['html']=render_to_string('article/include-article-authors.html',sdata,context_instance=RequestContext(request))\n data['status'] = 1\n return HttpResponse(simplejson.dumps(data))\n #else:return HttpResponse('0')\n return render_to_response('article/add-author.html',data,context_instance=RequestContext(request))\n\n\ndef delete_author(request):\n data = {}\n sdata = {}\n id = request.GET.get('id',False)\n \n author = Author.objects.get(id = int(id))\n author.delete()\n data['status'] = 1\n return HttpResponse(simplejson.dumps(data))\n\n######################################################ARTICLE AUTHOR VIEWS END################################################ \n \n \n", "repo_name": "sandeeprao6289/SampleArticleProject", "sub_path": "articles/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 7289, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "94", "api": [{"api_name": "articles.models.Article.objects.filter", "line_number": 19, "usage_type": "call"}, {"api_name": "articles.models.Article.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "articles.models.Article", "line_number": 19, "usage_type": "name"}, {"api_name": "articles.models.Article.objects.filter", "line_number": 21, "usage_type": "call"}, {"api_name": "articles.models.Article.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "articles.models.Article", "line_number": 21, "usage_type": "name"}, {"api_name": "articles.models", "line_number": 22, "usage_type": "name"}, {"api_name": "articles.models.Article.objects.filter", "line_number": 22, "usage_type": "call"}, {"api_name": "articles.models.Article.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "articles.models.Article", "line_number": 22, "usage_type": "name"}, {"api_name": "articles.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 28, "usage_type": "call"}, {"api_name": "articles.models.Article.objects.get", "line_number": 32, "usage_type": "call"}, {"api_name": "articles.models.Article.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "articles.models.Article", "line_number": 32, "usage_type": "name"}, {"api_name": "articles.models", "line_number": 35, "usage_type": "name"}, {"api_name": "articles.models.Article.objects.all", "line_number": 35, "usage_type": "call"}, {"api_name": "articles.models.Article.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "articles.models.Article", "line_number": 35, "usage_type": "name"}, {"api_name": "articles.models", "line_number": 36, "usage_type": "name"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 37, "usage_type": "call"}, {"api_name": "articles.models.Article.objects.get", "line_number": 43, "usage_type": "call"}, {"api_name": "articles.models.Article.objects", "line_number": 43, "usage_type": "attribute"}, {"api_name": "articles.models.Article", "line_number": 43, "usage_type": "name"}, {"api_name": "articles.forms.ArticleForm", "line_number": 45, "usage_type": "call"}, {"api_name": "articles.forms.ArticleForm", "line_number": 48, "usage_type": "call"}, {"api_name": "articles.forms.ArticleForm", "line_number": 50, "usage_type": "call"}, {"api_name": "articles.forms.ArticleForm", "line_number": 51, "usage_type": "call"}, {"api_name": "django.template.defaultfilters.slugify", "line_number": 55, "usage_type": "call"}, {"api_name": "articles.models.ArticleImage.objects.get", "line_number": 59, "usage_type": "call"}, {"api_name": "articles.models.ArticleImage.objects", "line_number": 59, "usage_type": "attribute"}, {"api_name": "articles.models.ArticleImage", "line_number": 59, "usage_type": "name"}, {"api_name": "articles.models.ArticleImage.objects.get", "line_number": 70, "usage_type": "call"}, {"api_name": "articles.models.ArticleImage.objects", "line_number": 70, "usage_type": "attribute"}, {"api_name": "articles.models.ArticleImage", "line_number": 70, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 73, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 73, "usage_type": "call"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 76, "usage_type": "call"}, {"api_name": "articles.models.Article.objects.get", "line_number": 80, "usage_type": "call"}, {"api_name": "articles.models.Article.objects", "line_number": 80, "usage_type": "attribute"}, {"api_name": "articles.models.Article", "line_number": 80, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 82, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 82, "usage_type": "call"}, {"api_name": "articles.models.ArticleImage.objects.get", "line_number": 87, "usage_type": "call"}, {"api_name": "articles.models.ArticleImage.objects", "line_number": 87, "usage_type": "attribute"}, {"api_name": "articles.models.ArticleImage", "line_number": 87, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 91, "usage_type": "call"}, {"api_name": "django.utils.simplejson.dumps", "line_number": 91, "usage_type": "call"}, {"api_name": "django.utils.simplejson", "line_number": 91, "usage_type": "name"}, {"api_name": "articles.models.ArticleImage", "line_number": 100, "usage_type": "call"}, {"api_name": "django.template.loader.render_to_string", "line_number": 106, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 106, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 111, "usage_type": "call"}, {"api_name": "django.utils.simplejson.dumps", "line_number": 111, "usage_type": "call"}, {"api_name": "django.utils.simplejson", "line_number": 111, "usage_type": "name"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 93, "usage_type": "name"}, {"api_name": "articles.models.ArticleCategory.objects.all", "line_number": 116, "usage_type": "call"}, {"api_name": "articles.models.ArticleCategory.objects", "line_number": 116, "usage_type": "attribute"}, {"api_name": "articles.models.ArticleCategory", "line_number": 116, "usage_type": "name"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 118, "usage_type": "call"}, {"api_name": "articles.models.ArticleCategory.objects.get", "line_number": 126, "usage_type": "call"}, {"api_name": "articles.models.ArticleCategory.objects", "line_number": 126, "usage_type": "attribute"}, {"api_name": "articles.models.ArticleCategory", "line_number": 126, "usage_type": "name"}, {"api_name": "articles.models.ArticleCategory", "line_number": 131, "usage_type": "call"}, {"api_name": "django.template.defaultfilters.slugify", "line_number": 133, "usage_type": "call"}, {"api_name": "articles.models.ArticleCategory.objects.all", "line_number": 135, "usage_type": "call"}, {"api_name": "articles.models.ArticleCategory.objects", "line_number": 135, "usage_type": "attribute"}, {"api_name": "articles.models.ArticleCategory", "line_number": 135, "usage_type": "name"}, {"api_name": "django.template.loader.render_to_string", "line_number": 137, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 137, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 139, "usage_type": "call"}, {"api_name": "django.utils.simplejson.dumps", "line_number": 139, "usage_type": "call"}, {"api_name": "django.utils.simplejson", "line_number": 139, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 141, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 141, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 120, "usage_type": "name"}, {"api_name": "articles.models.ArticleCategory.objects.get", "line_number": 148, "usage_type": "call"}, {"api_name": "articles.models.ArticleCategory.objects", "line_number": 148, "usage_type": "attribute"}, {"api_name": "articles.models.ArticleCategory", "line_number": 148, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 151, "usage_type": "call"}, {"api_name": "django.utils.simplejson.dumps", "line_number": 151, "usage_type": "call"}, {"api_name": "django.utils.simplejson", "line_number": 151, "usage_type": "name"}, {"api_name": "articles.models.Author.objects.all", "line_number": 158, "usage_type": "call"}, {"api_name": "articles.models.Author.objects", "line_number": 158, "usage_type": "attribute"}, {"api_name": "articles.models.Author", "line_number": 158, "usage_type": "name"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 160, "usage_type": "call"}, {"api_name": "articles.models.Author.objects.get", "line_number": 168, "usage_type": "call"}, {"api_name": "articles.models.Author.objects", "line_number": 168, "usage_type": "attribute"}, {"api_name": "articles.models.Author", "line_number": 168, "usage_type": "name"}, {"api_name": "articles.models.Author", "line_number": 173, "usage_type": "call"}, {"api_name": "django.template.defaultfilters.slugify", "line_number": 175, "usage_type": "call"}, {"api_name": "articles.models.Author.objects.all", "line_number": 177, "usage_type": "call"}, {"api_name": "articles.models.Author.objects", "line_number": 177, "usage_type": "attribute"}, {"api_name": "articles.models.Author", "line_number": 177, "usage_type": "name"}, {"api_name": "django.template.loader.render_to_string", "line_number": 179, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 179, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 181, "usage_type": "call"}, {"api_name": "django.utils.simplejson.dumps", "line_number": 181, "usage_type": "call"}, {"api_name": "django.utils.simplejson", "line_number": 181, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 183, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 183, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 162, "usage_type": "name"}, {"api_name": "articles.models.Author.objects.get", "line_number": 191, "usage_type": "call"}, {"api_name": "articles.models.Author.objects", "line_number": 191, "usage_type": "attribute"}, {"api_name": "articles.models.Author", "line_number": 191, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 194, "usage_type": "call"}, {"api_name": "django.utils.simplejson.dumps", "line_number": 194, "usage_type": "call"}, {"api_name": "django.utils.simplejson", "line_number": 194, "usage_type": "name"}]} +{"seq_id": "580676796", "text": "from datetime import datetime\n\nimport dataset\n\n\ndef sqlite(database, table, metadata=None):\n db = dataset.connect(\"sqlite:///db/\" + database)\n\n if metadata is not None:\n metatable = db.get_table(\"_metadata\")\n metatable.insert(dict(metadata, table=table))\n\n return db.get_table(table, primary_id=\"id_str\", primary_type=\"String\")\n", "repo_name": "tekhnus/misc", "sub_path": "drafts/twitterology/twitterology/sources.py", "file_name": "sources.py", "file_ext": "py", "file_size_in_byte": 351, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "dataset.connect", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "23704844802", "text": "import pandas as pd\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\n\nmodel = LogisticRegression()\n\ndf = pd.read_csv('titanic.csv')\ndf['male'] = df['Sex'] == 'male'\nX = df[['Pclass', 'male', 'Age', 'Siblings/Spouses', 'Parents/Children', 'Fare']].values\ny = df['Survived'].values\n\nX_test, y_test, X_train, y_train = train_test_split(X, y, random_state=27)\nprint(X_train)\nprint(X_test)", "repo_name": "NobuyaN/DataManipulation", "sub_path": "train_test_data.py", "file_name": "train_test_data.py", "file_ext": "py", "file_size_in_byte": 440, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 5, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 7, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "7407081679", "text": "import os\nimport shutil\nimport distutils.command.clean\nfrom pathlib import Path\n\nfrom setuptools import setup, find_namespace_packages\nfrom ml.shutil import run as sh\nfrom ml import logging\n\ndef write_version_py(path, major=None, minor=None, patch=None, suffix='', sha='Unknown'):\n if major is None or minor is None or patch is None:\n major, minor, patch = sh(\"git tag --sort=taggerdate | tail -1\")[1:].split('.')\n sha = sh(\"git rev-parse HEAD\")\n logging.info(f\"Build version {major}.{minor}.{patch}-{sha}\")\n\n path = Path(path).resolve()\n pkg = path.name\n PKG = pkg.upper()\n version = f'{major}.{minor}.{patch}{suffix}'\n if os.getenv(f'{PKG}_BUILD_VERSION'):\n assert os.getenv(f'{PKG}_BUILD_NUMBER') is not None\n build_number = int(os.getenv(f'{PKG}_BUILD_NUMBER'))\n version = os.getenv(f'{PKG}_BUILD_VERSION')\n if build_number > 1:\n version += '.post' + str(build_number)\n elif sha != 'Unknown':\n version += '+' + sha[:7]\n\n import time\n content = f\"\"\"# GENERATED VERSION FILE\n# TIME: {time.asctime()}\n__version__ = {repr(version)}\ngit_version = {repr(sha)}\n\n#from ml import _C\n#if hasattr(_C, 'CUDA_VERSION'):\n# cuda = _C.CUDA_VERSION\n\"\"\"\n\n with open(path / 'version.py', 'w') as f:\n f.write(content)\n \n return version\n\n\ndef dist_info(pkgname):\n try:\n return get_distribution(pkgname)\n except DistributionNotFound:\n return None\n\n\nclass Clean(distutils.command.clean.clean):\n def run(self):\n import glob\n import re\n with open('.gitignore', 'r') as f:\n ignores = f.read()\n pat = re.compile(r'^#( BEGIN NOT-CLEAN-FILES )?')\n for wildcard in filter(None, ignores.split('\\n')):\n match = pat.match(wildcard)\n if match:\n if match.group(1):\n # Marker is found and stop reading .gitignore.\n break\n # Ignore lines which begin with '#'.\n else:\n for filename in glob.glob(wildcard):\n print(f\"removing {filename} to clean\")\n try:\n os.remove(filename)\n except OSError:\n shutil.rmtree(filename, ignore_errors=True)\n\t\n # It's an old-style class in Python 2.7...\n distutils.command.clean.clean.run(self)\n\n\ndef readme():\n with open('README.md', encoding='utf-8') as f:\n content = f.read()\n return content\n\n\nif __name__ == '__main__':\n namespaces = ['ml']\n packages = find_namespace_packages(include=['ml.*'], exclude=['ml.csrc', 'ml.csrc.*'])\n for pkg in packages:\n version = write_version_py(pkg.replace('.', '/'))\n\n cwd = Path(__file__).parent\n name = sh('basename -s .git `git config --get remote.origin.url`').upper()\n setup(\n name=name,\n version=version,\n author='Farley Lai;Deep Patel',\n url='https://gitlab.com/necla-ml/ML-WS',\n description=f\"Supporting library for web services\",\n long_description=readme(),\n license='BSD-3',\n packages=namespaces + packages,\n cffi_modules=[\"ml/csrc/build.py:ffi\"],\n setup_requires=[\"cffi>=1.0.0\"],\n install_requires=[\"cffi>=1.0.0\"],\n zip_safe=False,\n cmdclass=dict(clean=Clean)\n )", "repo_name": "necla-ml/ML-WS", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 3464, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "97", "api": [{"api_name": "ml.shutil.run", "line_number": 12, "usage_type": "call"}, {"api_name": "ml.shutil.run", "line_number": 13, "usage_type": "call"}, {"api_name": "ml.logging.info", "line_number": 14, "usage_type": "call"}, {"api_name": "ml.logging", "line_number": 14, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 16, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 20, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 21, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 22, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 23, "usage_type": "call"}, {"api_name": "time.asctime", "line_number": 31, "usage_type": "call"}, {"api_name": "distutils.command.clean.command", "line_number": 53, "usage_type": "attribute"}, {"api_name": "distutils.command.clean", "line_number": 53, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 59, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 68, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 71, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 73, "usage_type": "call"}, {"api_name": "distutils.command.clean.command.clean.clean.run", "line_number": 76, "usage_type": "call"}, {"api_name": "distutils.command.clean.command", "line_number": 76, "usage_type": "attribute"}, {"api_name": "distutils.command.clean", "line_number": 76, "usage_type": "name"}, {"api_name": "setuptools.find_namespace_packages", "line_number": 87, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 91, "usage_type": "call"}, {"api_name": "ml.shutil.run", "line_number": 92, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 93, "usage_type": "call"}]} +{"seq_id": "26524685768", "text": "from django.conf.urls import patterns, url,include\n \nfrom school import views\n \nurlpatterns = patterns('school.views',\n # ex: /polls/ \n \n \n \n \turl(r'^$','index'),\n url(r'^login/$','login'),\n\turl(r'^result/$','lottery'),\n\turl(r'^id_s/$','id_s'),\n\turl(r'^new_member/$','new_member'),\n\turl(r'^delete_member/$','delete_member'),\n\turl(r'^gift/$','gift'),\n\turl(r'^giver/$','giver'),\n\n\n\n\n\n \n \n)", "repo_name": "midmars/gitf_choice", "sub_path": "htman/school/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 409, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "django.conf.urls.patterns", "line_number": 5, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "7911271059", "text": "from tensorflow import keras\nimport tensorflow as tf\nimport cv2\nimport numpy as np\nfrom preprocess_input import preprocess_input,gamma_correction\nfrom loading_model import Singleton_model,Singleton_MTCNN\nimport logging\n\ndef face_verification(chunk_path,known_embedding):\n detector = Singleton_MTCNN.getInstance()\n vs = cv2.VideoCapture(str(chunk_path))\n read=0 #frame reading counter\n matched=0 #matched counter\n procedded_fram=0 # the number of frame which is procedded\n\n # loop over some frames\n while True:\n # grab the frame from the threaded video stream \n (grabbed,frame) = vs.read()\n read += 1\n\n #When the video ends \n if not grabbed:\n if procedded_fram:\n match_presentage=(matched)/(procedded_fram)\n if match_presentage>.8:\n return(True)\n return(False)\n\n # check to see if we should process this frame\n if read % 15 == 0:\n #detection any face in the frame\n face = detector.detect_faces(frame)\n\n # if any detection has confidence more than 95%\n if len(face):\n if face[0]['confidence']>.95:\n x1, y1, width, height = face[0]['box']\n x2, y2 = x1 + width, y1 + height\n #extracting the face \n face=frame[y1:y2, x1:x2]\n #insuring that face has shape not adummy face\n if (face.shape[0]>0 and face.shape[1]>0):\n candidate_embedding=embedding_calculating(face)\n flag=is_match(known_embedding,candidate_embedding)\n procedded_fram+=1\n if flag:\n matched+=1\n \n\"\"\" \ntake an array of refrence embeddings and the currecnt embedding\nand return if matched or not\n\"\"\"\ndef is_match(known_embedding, candidate_embedding, thresh=.6):\n match=0\n matched=False\n score = tf.norm(known_embedding- candidate_embedding, axis=1)\n logging.info(score)\n print(score)\n if score <= thresh:\n matched=True\n return(matched)\n\n\"\"\"\ntaking an image and return the embedding of theat image\n\"\"\"\ndef embedding_calculating(img):\n img=gamma_correction(img)\n img=cv2.resize(img,(224, 224))\n img_arr=np.asarray(img,dtype=np.float64)\n norm_img=preprocess_input(img_arr)\n tens_img= tf.convert_to_tensor(cv2.resize(norm_img,(224,224)))\n model=Singleton_model.getInstance()\n # model=keras.models.load_model('./resnet50_triplet_loss_2048.h5', custom_objects={'tf': tf},compile=False)\n embedding=tf.math.l2_normalize(model.predict(np.expand_dims(tens_img, axis=0)), axis=-1)\n return(embedding)\n\n# \"\"\"\n# this function is taken from keras_vggface as it is this model that we need to rebuild it in tensorflow v2.4 instead of version 1\n# \"\"\"\n# def preprocess_input(x, data_format=None, version=1):\n# x_temp = np.copy(x)\n# K = tf.keras.backend\n# if data_format is None:\n# data_format = K.image_data_format()\n# assert data_format in {'channels_last', 'channels_first'}\n# if version == 1:\n# if data_format == 'channels_first':\n# x_temp = x_temp[:, ::-1, ...]\n# x_temp[:, 0, :, :] -= 93.5940\n# x_temp[:, 1, :, :] -= 104.7624\n# x_temp[:, 2, :, :] -= 129.1863\n# else:\n# x_temp = x_temp[..., ::-1]\n# x_temp[..., 0] -= 93.5940\n# x_temp[..., 1] -= 104.7624\n# x_temp[..., 2] -= 129.1863\n# elif version == 2:\n# if data_format == 'channels_first':\n# x_temp = x_temp[:, ::-1, ...]\n# x_temp[:, 0, :, :] -= 91.4953\n# x_temp[:, 1, :, :] -= 103.8827\n# x_temp[:, 2, :, :] -= 131.0912\n# else:\n# x_temp = x_temp[..., ::-1]\n# x_temp[..., 0] -= 91.4953\n# x_temp[..., 1] -= 103.8827\n# x_temp[..., 2] -= 131.0912\n# else:\n# raise NotImplementedError\n# return x_temp", "repo_name": "Leon-OnlineLearning/face_verification", "sub_path": "exam/face_verification.py", "file_name": "face_verification.py", "file_ext": "py", "file_size_in_byte": 4074, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "loading_model.Singleton_MTCNN.getInstance", "line_number": 10, "usage_type": "call"}, {"api_name": "loading_model.Singleton_MTCNN", "line_number": 10, "usage_type": "name"}, {"api_name": "cv2.VideoCapture", "line_number": 11, "usage_type": "call"}, {"api_name": "tensorflow.norm", "line_number": 57, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 58, "usage_type": "call"}, {"api_name": "preprocess_input.gamma_correction", "line_number": 68, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 70, "usage_type": "attribute"}, {"api_name": "preprocess_input.preprocess_input", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.convert_to_tensor", "line_number": 72, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 72, "usage_type": "call"}, {"api_name": "loading_model.Singleton_model.getInstance", "line_number": 73, "usage_type": "call"}, {"api_name": "loading_model.Singleton_model", "line_number": 73, "usage_type": "name"}, {"api_name": "tensorflow.math.l2_normalize", "line_number": 75, "usage_type": "call"}, {"api_name": "tensorflow.math", "line_number": 75, "usage_type": "attribute"}, {"api_name": "numpy.expand_dims", "line_number": 75, "usage_type": "call"}]} +{"seq_id": "31386551364", "text": "from django.shortcuts import render,redirect\nfrom.forms import NewUserForm\nfrom django.contrib.auth.models import User\nfrom django.contrib import messages\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib.auth import authenticate\nfrom sklearn.preprocessing import LabelEncoder\nfrom imblearn.over_sampling import RandomOverSampler\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\n\n# Create your views here.\n\ndef index(request):\n return render(request,'index.html')\n\ndef about(request):\n return render(request,'about.html')\n\n\ndef register(request):\n if request.method == 'POST':\n form = NewUserForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request,'Registeration Sucessufull.')\n return redirect(\"login\")\n messages.error(\n request, \"Unsuccessful rregistraion\"\n \n )\n form = NewUserForm()\n return render(request=request, template_name='register.html', context={'register_form': form})\n\n\n# login page\n\ndef login(request):\n if request.method == \"POST\":\n form = AuthenticationForm(request, data=request.POST)\n if form.is_valid():\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password')\n user = authenticate(username=username, password=password)\n if user is not None:\n messages.info(request, f\"You are now logged in as {username}.\")\n return redirect(\"userhome\")\n else:\n messages.error(request, \"Invalid username or password.\")\n else:\n messages.error(request, \"Invalid username or password.\")\n form = AuthenticationForm()\n return render(request=request, template_name= 'login.html', context={\"login_form\": form})\n\n\ndef userhome(request):\n return render(request,'userhome.html')\n\ndef view(request):\n global df\n df = pd.read_excel('crimeapp/20230320020226crime_data_extended_entries.xlsx')\n col = df.head(100).to_html\n return render(request, \"view.html\", {'table': col})\n\n\ndef moduless(request):\n global df,x_train, x_test, y_train, y_test\n df = pd.read_excel('crimeapp/20230320020226crime_data_extended_entries.xlsx')\n #Delete a unknown column\n df.drop(\"date\",axis=1,inplace=True)\n df.drop(\"time_of_day\",axis=1,inplace=True)\n df.drop(\"latitude\",axis=1,inplace=True)\n df.drop(\"longitude\",axis=1,inplace=True)\n le = LabelEncoder()\n col = df[['crime_type','location','victim_gender','perpetrator_gender','weapon','injury','weather','previous_activity']]\n for i in col:\n df[i]=le.fit_transform(df[i])\n x = df.drop(['crime_type'], axis = 1) \n y = df['crime_type']\n Oversample = RandomOverSampler(random_state=72)\n x_sm, y_sm = Oversample.fit_resample(x[:100],y[:100])\n x_train, x_test, y_train, y_test = train_test_split(x_sm, y_sm, test_size = 0.3, random_state= 72) \n if request.method == \"POST\":\n model = request.POST['algo']\n\n if model == \"1\":\n re = RandomForestClassifier(random_state=72)\n re.fit(x_train,y_train)\n re_pred = re.predict(x_test)\n ac = accuracy_score(y_test,re_pred)\n ac\n msg='Accuracy of RandomForest : ' + str(ac)\n return render(request,'moduless.html',{'msg':msg})\n elif model == \"2\":\n de = DecisionTreeClassifier()\n de.fit(x_train,y_train)\n de_pred = de.predict(x_test)\n ac1 = accuracy_score(y_test,de_pred)\n ac1\n msg='Accuracy of Decision tree : ' + str(ac1)\n return render(request,'moduless.html',{'msg':msg})\n elif model == \"3\":\n gd = GradientBoostingClassifier()\n gd.fit(x_train,y_train)\n gd_pred = gd.predict(x_test)\n bc = accuracy_score(y_test,gd_pred)\n bc\n msg='Accuracy of GradientBoostingClassifier : ' + str(bc)\n return render(request,'moduless.html',{'msg':msg})\n return render(request,'moduless.html')\n\n\ndef prediction(request):\n try:\n global df,x_train, x_test, y_train, y_test\n\n if request.method == 'POST':\n a = float(request.POST['f1'])\n # b = float(request.POST['f2'])\n # c = float(request.POST['f3'])\n d = float(request.POST['f4'])\n e = float(request.POST['f5'])\n f = float(request.POST['f6'])\n g = float(request.POST['f7'])\n h = float(request.POST['f8'])\n i = float(request.POST['f9'])\n j = float(request.POST['f10'])\n k = float(request.POST['f11'])\n l = float(request.POST['f12'])\n \n l = [[a,d,e,f,g,h,i,j,k,l]]\n de = DecisionTreeClassifier()\n de.fit(x_train,y_train)\n pred = de.predict(l)\n if pred == 0:\n msg = 'Robbery'\n elif pred == 1:\n msg = 'Embezzlement'\n elif pred == 2:\n msg = 'Burglary'\n elif pred == 3:\n msg = 'Vandalism'\n elif pred == 4:\n msg = 'Theft'\n elif pred == 5:\n msg = 'Assault'\n elif pred == 6:\n print('Forgery')\n elif pred == 7:\n msg ='Drug Offense'\n else:\n msg = 'Fraud'\n \n if a == 1:\n lat = 12.9255\n lag = 77.5468\n name = \"Banashankari\"\n if a == 2:\n lat = 12.9304\n lag = 77.6784\n name = \"Bellandur\"\n if a == 3:\n lat = 12.8452 \n lag = 77.6602\n name = \"Electronic City\"\n if a == 4:\n lat = 12.9121 \n lag = 77.6446\n name = \"HSR layout\"\n if a == 5:\n lat = 12.9784\n lag = 77.6408\n name = \"Indiranagar\"\n if a == 6:\n lat = 12.9308\n lag = 77.5838\n name = \"jayanagar\"\n if a == 7:\n lat = 12.9063\n lag = 77.5857\n name = \"jp nagar\"\n if a == 8:\n lat = 12.9855\n lag = 77.5269\n name = \"Kamakshipalya\"\n if a == 9:\n lat = 12.9352\n lag = 77.6245\n name = \"Koramangala\"\n if a == 10:\n lat = 12.9569\n lag = 77.7011\n name = \"Marathahalli\"\n if a == 11:\n lat = 12.9698\n lag = 77.7500\n name = \"White Field\"\n if a == 12:\n lat = 13.1155\n lag = 77.6070\n name = \"White Field\"\n \n print(lat)\n print(lag)\n import folium\n m = folium.Map(location=[19,-12],zoom_start=2)\n folium.Marker([lat,lag],tooltip=name,popup=msg).add_to(m)\n m = m._repr_html_()\n print(msg)\n return render(request,'result.html',{'msg':msg,'m':m})\n except:\n msg = \"Please give a required input\"\n return render(request,'prediction.html',{'msg':msg})\n \n\n return render(request,'prediction.html')", "repo_name": "ayushilodha/Analysis-Prediction-of-Crime-Hotspots", "sub_path": "code/Frontend/crime/crimeapp/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 7601, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "97", "api": [{"api_name": "django.shortcuts.render", "line_number": 19, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 22, "usage_type": "call"}, {"api_name": "forms.NewUserForm", "line_number": 27, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 30, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 30, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 31, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 32, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 32, "usage_type": "name"}, {"api_name": "forms.NewUserForm", "line_number": 36, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 37, "usage_type": "call"}, {"api_name": "django.contrib.auth.forms.AuthenticationForm", "line_number": 44, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 48, "usage_type": "call"}, {"api_name": "django.contrib.messages.info", "line_number": 50, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 50, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 51, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 53, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 53, "usage_type": "name"}, {"api_name": "django.contrib.messages.error", "line_number": 55, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 55, "usage_type": "name"}, {"api_name": "django.contrib.auth.forms.AuthenticationForm", "line_number": 56, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 57, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 61, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 65, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 67, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 72, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 78, "usage_type": "call"}, {"api_name": "imblearn.over_sampling.RandomOverSampler", "line_number": 84, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 86, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 91, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 94, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 97, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 99, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 102, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 105, "usage_type": "call"}, {"api_name": "sklearn.ensemble.GradientBoostingClassifier", "line_number": 107, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 110, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 113, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 114, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 136, "usage_type": "call"}, {"api_name": "folium.Map", "line_number": 210, "usage_type": "call"}, {"api_name": "folium.Marker", "line_number": 211, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 214, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 217, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 220, "usage_type": "call"}]} +{"seq_id": "371163457", "text": "\"\"\"Routes configuration\n\nThe more specific and detailed routes should be defined first so they\nmay take precedent over the more generic routes. For more information\nrefer to the routes manual at http://routes.groovie.org/docs/\n\"\"\"\nfrom routes import Mapper\n\ndef make_map(config):\n \"\"\"Create, configure and return the routes Mapper\"\"\"\n map = Mapper(directory=config['pylons.paths']['controllers'],\n always_scan=config['debug'])\n map.minimization = False\n map.explicit = False\n map.sub_domains = True\n map.sub_domains_ignore = \"www\"\n\n # The ErrorController route (handles 404/500 error pages); it should\n # likely stay at the top, ensuring it can always be resolved\n map.connect('/error/{action}', controller='error')\n map.connect('/error/{action}/{id}', controller='error')\n\n # CUSTOM ROUTES HERE\n map.connect('/', controller='index', action='index')\n \n # Web API\n # Wraps a safe subset of adroll.api for public consumption.\n map.connect('/api/v{version}/{module}/{function}/{id}', version=1, controller='api', action='dispatch')\n map.connect('/api/v{version}/{module}/{function}', version=1, controller='api', action='dispatch')\n map.connect('/api/v{version}/{module}', version=1, controller='api', action='dispatch')\n \n # Shortened webservice routes. Meant to be accessed from api.adroll.com per documentation.\n map.connect('/v1/{module}/{function}/{id}', version=1, controller='api', action='dispatch', sub_domain='api')\n map.connect('/v1/{module}/{function}', version=1, controller='api', action='dispatch', sub_domain='api')\n map.connect('/v1/{module}', version=1, controller='api', action='dispatch', sub_domain='api')\n\n map.connect('/admin', controller='admin/search', action='index')\n \n map.connect('/m/{action}', controller='mobile/index', action='index')\n map.connect('/m/', controller='mobile/index', action='index')\n map.connect('/m', controller='mobile/index', action='index')\n map.connect('/m/a/{action}', controller='mobile/auth', action='login')\n \n map.connect('/{controller}/{action}')\n map.connect('/{controller}/{action}/{id}')\n\n return map\n", "repo_name": "Nullicopter/tethr", "sub_path": "tethr/config/routing.py", "file_name": "routing.py", "file_ext": "py", "file_size_in_byte": 2175, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "97", "api": [{"api_name": "routes.Mapper", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "74089216308", "text": "\"\"\" illustrates use of findNeighbour4\n\nIllustrates the SNP distribution of all samples in the server.\ncommunicates directly with mongodb; does not use the REST API\n\"\"\"\n\nif __name__ == \"__main__\":\n import pandas as pd\n import gzip\n import json\n import progressbar\n\n from findn.common_utils import ConfigManager\n from findn.mongoStore import fn3persistence\n\n # instantiate storage class\n config_file = \"demos/covid/covid_config_v3.json\"\n cfm = ConfigManager(config_file)\n CONFIG = cfm.read_config()\n try:\n PERSIST = fn3persistence(\n dbname=CONFIG[\"SERVERNAME\"], connString=CONFIG[\"FNPERSISTENCE_CONNSTRING\"], debug=0\n ) # if in debug mode wipes all data. This is not what is wanted here, even if we are using unittesting database\n\n except Exception:\n raise\n\n existing_guids = PERSIST.guids()\n print(\"There are {0} existing guids\".format(len(existing_guids)))\n\n inputfile = \"/data/software/fn4dev/testdata/pca/cog_metadata.csv.gz\" # COG-UK data\n # read metadata file into pandas\n with gzip.open(inputfile, \"rt\") as f:\n df = pd.read_csv(f)\n\n # we are using the middle part of the cog_id as the sample name as the sample_id; extract this.\n sample_ids = df[\"sequence_name\"].to_list()\n df[\"sample_id\"] = [x.split(\"/\")[1] for x in sample_ids]\n # make sample_id the index\n df.set_index(\"sample_id\", inplace=True)\n regions = df[\"adm1\"].unique()\n starting_counts = {}\n for region in regions:\n starting_counts[region] = {0: 0, 1: 0, 2: 0, 3: 0}\n starting_counts[\"ALL\"] = {0: 0, 1: 0, 2: 0, 3: 0}\n\n print(\"There are {0} samples in the COG-UK list\".format(len(df.index)))\n\n both = set(df.index).intersection(existing_guids)\n print(\"There are {0} samples in the server which are also annotated\".format(len(both)))\n bar = progressbar.ProgressBar(max_value=len(both))\n all_dists = dict()\n for i, guid in enumerate(sorted(both)):\n bar.update(i)\n neighbours = PERSIST.guid2neighbours(guid, returned_format=1, cutoff=3)\n all_dists[guid] = starting_counts.copy()\n\n for (neighbouring_guid, distance) in neighbours[\"neighbours\"]:\n if neighbouring_guid in both:\n adm1 = df.at[neighbouring_guid, \"adm1\"]\n all_dists[guid][adm1][distance] += 1\n all_dists[guid][\"ALL\"][distance] += 1\n\n # if i> 200:\n # break\n outputfile = \"/data/data/pca/distribs.json\"\n with open(outputfile, \"w\") as f:\n json.dump(all_dists, f)\n print(\"Complete. wrote json to \", outputfile)\n", "repo_name": "davidhwyllie/findNeighbour4", "sub_path": "demo/demo_distances_to2.py", "file_name": "demo_distances_to2.py", "file_ext": "py", "file_size_in_byte": 2593, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "94", "api": [{"api_name": "findn.common_utils.ConfigManager", "line_number": 18, "usage_type": "call"}, {"api_name": "findn.mongoStore.fn3persistence", "line_number": 21, "usage_type": "call"}, {"api_name": "gzip.open", "line_number": 33, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 34, "usage_type": "call"}, {"api_name": "progressbar.ProgressBar", "line_number": 51, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "14907877618", "text": "# Problem 65\n\n# real 0m0.061s\n# user 0m0.041s\n# sys 0m0.012s\n\nfrom fractions import Fraction\nfrom utils import digit_sum\n\n# a helper for the lookup values of e\n# n > 1\ndef get_convergent_value(n):\n if n % 3:\n return 1\n else:\n return (n//3) * 2\n\ndef make_convergent_frac(n,rest_num,rest_denom):\n if n == 1:\n return (rest_num,rest_denom)\n else:\n v = get_convergent_value(n)\n if rest_num and rest_denom:\n # then factor in v\n frac = Fraction(v*rest_denom + rest_num,rest_denom)\n return make_convergent_frac(n-1,frac.denominator,frac.numerator)\n else:\n return make_convergent_frac(n-1,1,v)\n\ndef get_convergent_num(n):\n (num,den) = make_convergent_frac(n,0,0)\n frac = Fraction(2*den + num, den)\n return(frac.numerator,frac.denominator,get_convergent_value(n))\n\nfor n in [100]:\n (num,den,v) = get_convergent_num(n)\n print(digit_sum(num))\n", "repo_name": "nathanandersen/ProjectEuler", "sub_path": "Solved/p065.py", "file_name": "p065.py", "file_ext": "py", "file_size_in_byte": 945, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "fractions.Fraction", "line_number": 25, "usage_type": "call"}, {"api_name": "fractions.Fraction", "line_number": 32, "usage_type": "call"}, {"api_name": "utils.digit_sum", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "29749207589", "text": "from typing import Optional\n\nimport tensorflow as tf\n\nfrom ..inducing_variables import InducingVariables\nfrom ..kernels import Kernel\nfrom ..base import MeanAndVariance\nfrom ..posteriors import IndependentPosteriorSingleOutput, IndependentPosteriorMultiOutput\n\ndef conditional_GP(\n Xnew: tf.Tensor,\n inducing_variable: InducingVariables,\n kernel: Kernel,\n f: tf.Tensor,\n *,\n full_cov: bool = False,\n full_output_cov: bool = False,\n q_sqrt: Optional[tf.Tensor] = None,\n white: bool = False,\n) -> MeanAndVariance:\n \"\"\"\n Single-output GP conditional.\n\n The covariance matrices used to calculate the conditional have the following shape:\n - Kuu: [M, M]\n - Kuf: [M, N]\n - Kff: [N, N]\n\n Further reference\n -----------------\n - See `gpflow.conditionals._dense_conditional` (below) for a detailed explanation of\n conditional in the single-output case.\n - See the multiouput notebook for more information about the multiouput framework.\n\n Parameters\n ----------\n :param Xnew: data matrix, size [N, D].\n :param f: data matrix, [M, R]\n :param full_cov: return the covariance between the datapoints\n :param full_output_cov: return the covariance between the outputs.\n NOTE: as we are using a single-output kernel with repetitions\n these covariances will be zero.\n :param q_sqrt: matrix of standard-deviations or Cholesky matrices,\n size [M, R] or [R, M, M].\n :param white: boolean of whether to use the whitened representation\n :return:\n - mean: [N, R]\n - variance: [N, R], [R, N, N], [N, R, R] or [N, R, N, R]\n Please see `gpflow.conditional._expand_independent_outputs` for more information\n about the shape of the variance, depending on `full_cov` and `full_output_cov`.\n \"\"\"\n \n posterior = IndependentPosteriorMultiOutput(\n kernel,\n inducing_variable,\n f,\n q_sqrt,\n whiten=white,\n mean_function=None,\n )\n\n return posterior.fused_predict_f(Xnew, full_cov=full_cov, full_output_cov=full_output_cov)\n", "repo_name": "SebastianPopescu/GP_package", "sub_path": "gp_package/conditionals/conditionals.py", "file_name": "conditionals.py", "file_ext": "py", "file_size_in_byte": 2105, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "tensorflow.Tensor", "line_number": 11, "usage_type": "attribute"}, {"api_name": "inducing_variables.InducingVariables", "line_number": 12, "usage_type": "name"}, {"api_name": "kernels.Kernel", "line_number": 13, "usage_type": "name"}, {"api_name": "tensorflow.Tensor", "line_number": 14, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 18, "usage_type": "name"}, {"api_name": "tensorflow.Tensor", "line_number": 18, "usage_type": "attribute"}, {"api_name": "posteriors.IndependentPosteriorMultiOutput", "line_number": 53, "usage_type": "call"}, {"api_name": "base.MeanAndVariance", "line_number": 20, "usage_type": "name"}]} +{"seq_id": "25097326029", "text": "import os\nimport datetime as date\n\nimport constants as con\n\ndef make_entry(title: str) -> None:\n \"\"\"\n Creates an entry to the journal.\n\n Parameters:\n title (str): A title for the journal entry.\n\n Raises:\n RuntimeError: If the title is not provided, invalid, or\n already-existing.\n \"\"\"\n\n if title is None:\n raise RuntimeError(\"Title cannot be empty.\")\n \n title = \"\".join([c for c in title if c.isalnum() or c in ['.', '-', '_']])\n\n if not title:\n raise RuntimeError(\"Filtered title cannot be empty.\")\n\n if os.path.exists(os.path.join(con.DATA_FOLDER_PATH, title)):\n raise RuntimeError(\"Entry already exists.\")\n\n with open(os.path.join(con.DATA_FOLDER_PATH, title), \"w\") as file:\n file.write(date.date.today().strftime(format=\"%d/%m/%Y, %H:%M:%S\"))\n\ndef delete_entry(path: os.PathLike) -> None:\n \"\"\"\n Deletes a given entry.\n\n Parameters:\n path (os.PathLike): A path to the entry to delete.\n\n Raises:\n RuntimeError: If the entry at the given path does not exist.\n \"\"\"\n\n if os.path.exists(path):\n os.remove(path)\n else:\n raise RuntimeError(\"Entry does not exist.\")", "repo_name": "alex-wang-13/MyJournal", "sub_path": "main/io.py", "file_name": "io.py", "file_ext": "py", "file_size_in_byte": 1195, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "os.path.exists", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "constants.DATA_FOLDER_PATH", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "constants.DATA_FOLDER_PATH", "line_number": 29, "usage_type": "attribute"}, {"api_name": "datetime.date.today", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.PathLike", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "29752642719", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\ndef sigmoid(x):\n return (1/(1+np.exp(-x)))\n\ndef mse (y, y_hat):\n o = np.linalg.norm(y-y_hat)**2\n return o\n\ndef predict(X, b1, b2):\n Z1=sigmoid(X*b1)\n Z1_=np.hstack((Z1, np.ones((X.shape[0],1))))\n y_hat = sigmoid(Z1_*b2) \n return y_hat\n\ndef grad(x, y, b1, b2):\n\n #feedforward\n u1 = x*b1\n z1=sigmoid(u1)\n z1_=np.hstack((z1, np.ones((x.shape[0],1))))\n u2= z1_*b2\n z2 = sigmoid(u2) \n\n\n #back propagation\n error = -2*(y-z2)\n g2 = np.multiply(np.multiply(z2,(1-z2)),error)\n g1 = np.multiply(g2*b2[:-1,:].T, np.multiply(z1, (1-z1)))\n\n #gradient of the loss\n grad1 = x.T*g1\n grad2 = z1_.T*g2\n\n return grad1, grad2\n\n\ndef nester(x, y, b1, b2, alpha, v1, v2, epsilon):\n \n \"\"\"Implementation of Nesterov Momentum\"\"\"\n \n b1_tilde = b1 + alpha*v1\n b2_tilde = b2 + alpha*v2\n \n grad1, grad2 = grad(x,y, b1_tilde, b2_tilde)\n \n v1 = alpha*v1 - epsilon*grad1\n v2 = alpha*v2 - epsilon*grad2\n \n b1 = b1 + v1\n b2 = b2 + v2\n \n return b1, b2, v1, v2\n\ndef adam(x, y, b1, b2, phi1, phi2, epsilon, delta, t, s, r):\n \n \"\"\"Implementation of adam optimizer \"\"\"\n \n grad_dict = {}\n grad_dict[\"1\"], grad_dict[\"2\"] = grad(x,y, b1, b2)\n \n s_hat = {}\n r_hat = {}\n \n for i in list(grad_dict.keys()):\n s[i] = phi1*s[i] + (1-phi1)*grad_dict[i]\n s_hat[i] = s[i]/(1-phi1**t)\n \n r[i]= phi2*r[i] + (1-phi2)*np.multiply(grad_dict[i], grad_dict[i])\n r_hat[i] = r[i]/(1-phi2**t)\n \n b1 = b1-epsilon*(np.multiply(s_hat[\"1\"], 1/(delta+np.sqrt(r_hat[\"1\"]))))\n b2 = b2-epsilon*(np.multiply(s_hat[\"2\"], 1/(delta+np.sqrt(r_hat[\"2\"]))))\n \n\n return b1, b2, s, r\n\n\nX = np.matrix([[1, -1, -1],\n [1, -1,1],\n [1, 1,-1],\n [1, 1,1]])\n\nY= np.matrix([[0],[1],[1],[0]])\n\nb1 = np.matrix(np.random.rand(3,2))\nb2 = np.matrix(np.random.rand(3,1))\n\nb1_nester = b1\nb2_nester = b2\n\nv1 = np.matrix(np.zeros((3,1)))\nv2 = np.matrix(np.zeros((3,1)))\n\nb1_adam = b1\nb2_adam = b2\n\ns = {\"1\": np.zeros(b1.shape),\n \"2\": np.zeros(b2.shape)}\n\nr = {\"1\": np.zeros(b1.shape),\n \"2\": np.zeros(b2.shape)}\n\n\n#parameters\nmse_list = []\nepsilon = 0.001\nalpha = 0.5\ndelta = 1e-8\nt = 0\nphi1 = 0.9\nphi2 = 0.999\n\nfor i in range(1000):\n\n for j in range(X.shape[0]):\n \n t += 1\n \n #choosing stochastic sample\n idx = np.random.choice(X.shape[0],1)\n x = X[idx,]\n y = Y[idx]\n\n #gradient\n grad1, grad2 = grad(x,y, b1, b2)\n \n #updating parameters by SGD\n b1 = b1 - epsilon*grad1\n b2 = b2 - epsilon*grad2\n\n #updating parameters using nesterov momentum\n b1_nester, b2_nester, v1,v2 = nester(x, y, b1_nester, b2_nester, alpha, v1, v2, epsilon)\n \n #updating parameters using adam optimizer\n b1_adam, b2_adam, s, r = adam(x, y, b1_adam, b2_adam, phi1, phi2, epsilon, delta, t, s, r)\n \n #making predictions over all the training data to calculate MSE\n y_hat = predict(X, b1, b2)\n y_hat_nester = predict(X, b1_nester, b2_nester)\n y_hat_adam = predict(X, b1_adam, b2_adam)\n \n #measuring the error\n mse_SGD = mse(Y, y_hat)\n mse_nester = mse(Y, y_hat_nester)\n mse_adam = mse(Y, y_hat_adam)\n \n mse_list.append([mse_SGD, mse_nester, mse_adam])\n\nmse_list = np.array(mse_list)\nplt.plot(mse_list[:,0])\nplt.plot(mse_list[:,1])\nplt.plot(mse_list[:,2])\n\nplt.ylabel(\"MSE\")\nplt.xlabel(\"iteration\")\nplt.title(\"Convergence\")\nplt.legend((\"SGD\", \"Nesterov\", \"Adam\"))\nplt.title(\"Convergence with learning rate=\"+str(epsilon))\nplt.grid()\nplt.show()\n\n", "repo_name": "sebastianpinedaar/machine-learning-codes", "sub_path": "backprop-optimizers/MLP_adam.py", "file_name": "MLP_adam.py", "file_ext": "py", "file_size_in_byte": 3881, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "97", "api": [{"api_name": "numpy.exp", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 8, "usage_type": "attribute"}, {"api_name": "numpy.hstack", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 87, "usage_type": "attribute"}, {"api_name": "numpy.matrix", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 88, "usage_type": "attribute"}, {"api_name": "numpy.matrix", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 122, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 151, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 152, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 153, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 153, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 154, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 156, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 156, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 157, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 157, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 158, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 160, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 160, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 161, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}]} +{"seq_id": "9455707525", "text": "__author__ = 'yufangxian'\n\n\nfrom bs4 import BeautifulSoup\nfrom urllib import request\nimport numpy as np\nimport pandas as pd\nimport os\nfrom multiprocessing import Pool, Process\nimport time\n\n\n\nclass Dota2_crawler(object):\n def __init__(self):\n self._base_url = r\"http://www.gosugamers.net/dota2/gosubet?r-page=\"\n\n def crawler_page(self):\n start_time = time.time()\n for i in range(1, 50):\n url = self._base_url + str(i)\n self.do_crawler(url)\n print(\"time consume: %s\" % (time.time() - start_time))\n\n def do_crawler(self, url):\n parsed_url = request.urlopen(url)\n soup = BeautifulSoup(parsed_url, 'lxml')\n soup_lose = soup.findAll('span', class_=\"score losser hscore\")\n soup_win = soup.findAll('span', class_=\"score winner ascore\")\n soup_team1 = soup.findAll('span', class_=\"opp opp1\")\n soup_team2 = soup.findAll('span', class_=\"opp opp2\")\n soup_result = soup.findAll('span', class_=\"hidden\")\n path = '/users/yufangxian/Documents/dota_result'\n '''\n p1 = Process(target=self.save_team1, args=(path, soup_team1,))\n p1.start()\n p2 = Process(target=self.save_team2, args=(path, soup_team2,))\n p2.start()\n p3 = Process(target=self.save_res, args=(path, soup_result,))\n p3.start()\n p1.join()\n p2.join()\n p3.join()\n '''\n\n self.save_team1(path, soup_team1)\n self.save_team2(path, soup_team2)\n self.save_res(path, soup_result)\n #print(\"all processes have done!\")\n #self.save_res(path, soup_lose, soup_win, soup_team1, soup_team2, soup_result)\n\n @staticmethod\n def save_team1(path, soup_team1):\n saving_team1 = path + '/dota2_team1.txt'\n for item in soup_team1[-25:]:\n s = str(item.get_text())\n with open(saving_team1, 'a') as fp2:\n fp2.write(s[:-2]+'\\n')\n\n @staticmethod\n def save_team2(path, soup_team2):\n saving_team2 = path + '/dota2_team2.txt'\n for item in soup_team2[-25:]:\n with open(saving_team2, 'a') as fp3:\n fp3.write(str(item.get_text())+'\\n')\n\n @staticmethod\n def save_res(path, soup_result):\n #saving_loss_path = path + '/dota2_loss.txt'\n #saving_win_path = path +'/dota2_win.txt'\n #saving_team1 = path + '/dota2_team1.txt'\n #saving_team2 = path + '/dota2_team2.txt'\n saving_result = path + '/dota2_result.txt'\n '''\n for item in soup_lose:\n with open(saving_loss_path, 'a') as fp:\n fp.write(str(item.get_text())+'\\n')\n\n for item in soup_win:\n with open(saving_win_path, 'a') as fp1:\n fp1.write(str(item.get_text())+'\\n')\n '''\n\n for item in soup_result:\n with open(saving_result, 'a') as fp4:\n fp4.write(str(item.get_text())+'\\n')\n\n\ndef merge_data():\n path = '/users/yufangxian/Documents/dota_result/'\n team1 = pd.read_table(path + 'dota2_team1.txt', header=None)\n team2 = pd.read_table(path + 'dota2_team2.txt', header=None)\n result = pd.read_table(path + 'dota2_result.txt', header=None)\n live_up = len(team1) -len(result) #caculate the upcoming and living matches\n add_0 = np.zeros(live_up)\n df = pd.DataFrame(add_0)\n final_result = df.append(result, ignore_index=True)\n final_table = pd.concat([team1, final_result], axis=1, join_axes=[team1.index])\n final_table = pd.concat([final_table, team2], axis=1, join_axes=[final_table.index])\n final_table.to_csv(path+'final_result.csv')\n\n\ndef mining_data():\n pass\n\n\n\n\n\ndef main():\n dota2 = Dota2_crawler()\n dota2.crawler_page()\n merge_data()\n\nif __name__ == '__main__':\n main()\n", "repo_name": "fangxian/python", "sub_path": "crawler/dota2_crawler.py", "file_name": "dota2_crawler.py", "file_ext": "py", "file_size_in_byte": 3757, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "time.time", "line_number": 19, "usage_type": "call"}, {"api_name": "time.time", "line_number": 23, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 26, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 26, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.read_table", "line_number": 91, "usage_type": "call"}, {"api_name": "pandas.read_table", "line_number": 92, "usage_type": "call"}, {"api_name": "pandas.read_table", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 95, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 96, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 98, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 99, "usage_type": "call"}]} +{"seq_id": "41521209800", "text": "\"\"\"common.py: common tools\"\"\"\n__author__ = \"Yang Liu\"\n__email__ = \"lander14@outlook.com\"\n\nimport glob\nimport os\nimport shutil\nimport pycocotools.mask as rletools\nimport tqdm\n\n\n# Template function\ndef list_files_in_dir(root_dir: str, file_type=\".jpg\"):\n # List all the folders under the root_dir\n # Only the folder names that doesn't contain the full path\n folders = [fn.split('/')[-1] for fn in sorted(glob.glob(os.path.join(root_dir, '*')))]\n\n for idx, folder in enumerate(tqdm.tqdm(folders)):\n fpath = os.path.join(root_dir, folder)\n # List all files in the current folder\n files = sorted(glob.glob(fpath + '/*' + file_type))\n # =================================\n # Do the rest stuffs from here\n # =================================\n\n\n# Delete all txt file in the given folder\ndef delete_files(target_dir: str, suffix='.txt'):\n \"\"\"\n Delete existing files of certain format\n \"\"\"\n if os.path.exists(target_dir):\n all_files = os.listdir(target_dir)\n if len(all_files):\n print(target_dir, \"is not empty, will delete existing txt files.\")\n input(\"Press Enter to confirm DELETE and continue ...\")\n for item in all_files:\n if item.endswith(suffix):\n os.remove(os.path.join(target_dir, item))\n\n\ndef train_val_split(image_dir: str, split_ratio=0.9):\n \"\"\"\n Split the images in a folder to train and validation set.\n Specifically designed for ImageNet-like folder structures:\n\n - image_dir\n - train\n - classA\n - classB\n - image1.jpg\n - image2.jpg\n\n :param image_dir: the root directory of different classes of images.\n :param split_ratio: train_set = num_images * ratio, val_set = num_images * (1-ratio)\n \"\"\"\n folders = [fn.split('/')[-1] for fn in sorted(glob.glob(os.path.join(image_dir, 'train', '*')))]\n\n val_dir = image_dir + \"/val\"\n if not os.path.exists(val_dir):\n os.makedirs(val_dir)\n\n for idx, folder in enumerate(tqdm.tqdm(folders)):\n fpath = os.path.join(image_dir, 'train', folder)\n # List all files in the current folder\n files = glob.glob(fpath + '/*' + '.jpg')\n num_train = int(len(files) * split_ratio)\n num_val = len(files) - num_train\n\n # move\n val_folder = val_dir + '/' + folder\n if not os.path.exists(val_folder):\n os.makedirs(val_folder)\n\n for i in range(num_train, len(files)):\n src = files[i]\n fname = src.split('/')[-1]\n dest = os.path.join(val_dir, folder, fname)\n shutil.move(src, dest)\n\n\n# =========================================\n# Code from mots-tools\n# https://github.com/VisualComputingInstitute/mots_tools/blob/master/mots_common/io.py\n# ========================================\nclass SegmentedObject:\n def __init__(self, mask, class_id, track_id):\n self.mask = mask\n self.class_id = class_id\n self.track_id = track_id\n\n\ndef load_sequences(path, seqmap):\n objects_per_frame_per_sequence = {}\n for seq in seqmap:\n print(\"Loading sequence\", seq)\n seq_path_folder = os.path.join(path, seq)\n seq_path_txt = os.path.join(path, seq + \".txt\")\n if os.path.isdir(seq_path_folder):\n pass\n # objects_per_frame_per_sequence[seq] = load_images_for_folder(seq_path_folder)\n elif os.path.exists(seq_path_txt):\n objects_per_frame_per_sequence[seq] = load_txt(seq_path_txt)\n else:\n assert False, \"Can't find data in directory \" + path\n\n return objects_per_frame_per_sequence\n\n\ndef load_txt(path):\n objects_per_frame = {}\n track_ids_per_frame = {} # To check that no frame contains two objects with same id\n combined_mask_per_frame = {} # To check that no frame contains overlapping masks\n with open(path, \"r\") as f:\n for line in f:\n line = line.strip()\n fields = line.split(\" \")\n\n frame = int(fields[0])\n if frame not in objects_per_frame:\n objects_per_frame[frame] = []\n if frame not in track_ids_per_frame:\n track_ids_per_frame[frame] = set()\n if int(fields[1]) in track_ids_per_frame[frame]:\n assert False, \"Multiple objects with track id \" + fields[1] + \" in frame \" + fields[0]\n else:\n track_ids_per_frame[frame].add(int(fields[1]))\n\n class_id = int(fields[2])\n if not (class_id == 1 or class_id == 2 or class_id == 10):\n assert False, \"Unknown object class \" + fields[2]\n\n mask = {'size': [int(fields[3]), int(fields[4])], 'counts': fields[5].encode(encoding='UTF-8')}\n if frame not in combined_mask_per_frame:\n combined_mask_per_frame[frame] = mask\n elif rletools.area(rletools.merge([combined_mask_per_frame[frame], mask], intersect=True)) > 0.0:\n assert False, \"Objects with overlapping masks in frame \" + fields[0]\n else:\n combined_mask_per_frame[frame] = rletools.merge([combined_mask_per_frame[frame], mask], intersect=False)\n objects_per_frame[frame].append(SegmentedObject(\n mask,\n class_id,\n int(fields[1])\n ))\n\n return objects_per_frame\n\n\ndef load_seqmap(seqmap_filename):\n print(\"Loading seqmap...\")\n seqmap = []\n max_frames = {}\n with open(seqmap_filename, \"r\") as fh:\n for i, l in enumerate(fh):\n fields = l.split(\" \")\n seq = \"%04d\" % int(fields[0])\n seqmap.append(seq)\n max_frames[seq] = int(fields[3])\n return seqmap, max_frames\n# ======================================================\n\n\nif __name__ == \"__main__\":\n image_dir = \"/Users/lander14/Desktop/ImageNet/\"\n train_val_split(image_dir)\n", "repo_name": "YangLiu14/AI-LearningBook", "sub_path": "tools/common.py", "file_name": "common.py", "file_ext": "py", "file_size_in_byte": 5930, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "glob.glob", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 33, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 61, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "shutil.move", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path", "line_number": 97, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path", "line_number": 98, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}, {"api_name": "pycocotools.mask.area", "line_number": 136, "usage_type": "call"}, {"api_name": "pycocotools.mask", "line_number": 136, "usage_type": "name"}, {"api_name": "pycocotools.mask.merge", "line_number": 136, "usage_type": "call"}, {"api_name": "pycocotools.mask.merge", "line_number": 139, "usage_type": "call"}, {"api_name": "pycocotools.mask", "line_number": 139, "usage_type": "name"}]} +{"seq_id": "28865806164", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Aug 25 16:07:16 2020\r\n\r\n@author: Shaila Sarker\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nsalesData = pd.read_csv(\"D:/DS/1. Sales Analysis - 1st moment/SalesData.csv\", encoding= 'unicode_escape') #unicodeDecodeErrVanished\r\n\r\n#Exploratory Data Analysis\r\n#Measures of Central Tendency / First moment business decision\r\nsalesData.SALES.mean() \r\nsalesData.SALES.median()\r\n#salesData.SALES.mode() #used for categorical data\r\n\r\n# Measures of Dispersion / Second moment business decision\r\nsalesData.SALES.var() # variance\r\nsalesData.SALES.std()#standard deviation\r\nrange = max(salesData.SALES) - min(salesData.SALES) # range\r\nrange\r\n\r\n#Graphical Representation\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nplt.bar(height = salesData.SALES, x = np.arange(1,2824,1)) # initializing the parameter [meaningless >> showing the dataset as it is]\r\n\r\nplt.hist(salesData.SALES) #histogram or frequency plot [x-axis >> salesAmount, y-axis >> frequency]\r\n\r\nplt.boxplot(salesData.SALES) #boxplot [finds out the outliers]\r\n", "repo_name": "ssr17bd/1st-Moment-Business-Qs", "sub_path": "1stMomentBusinessQues.py", "file_name": "1stMomentBusinessQues.py", "file_ext": "py", "file_size_in_byte": 1045, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "pandas.read_csv", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.boxplot", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "29793178331", "text": "# @Time : 2021/5/25 17:31\n# @Author : lucas\n# @File : utils.py\n# @Project : SSO\n# @Software: PyCharm\nimport uuid\nfrom urllib.parse import urlencode\n\n\ndef get_real_state():\n return uuid.uuid4().hex[:9]\n\n\ndef build_url(base_url: str, params: dict, *path) -> str:\n url = base_url\n if path:\n for i in path:\n url = '{}/{}'.format(url, i)\n if params:\n url = '{}?{}'.format(url, urlencode(params))\n return url\n\n\nif __name__ == \"__main__\":\n print(get_real_state())\n print(build_url(\"http://www.baidu.com\", {\"q\": \"selenium\"}))\n print(build_url(\"http://www.baidu.com\", {}, \"新闻\"))\n", "repo_name": "lucas234/OauthDemo", "sub_path": "utils/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 630, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "uuid.uuid4", "line_number": 11, "usage_type": "call"}, {"api_name": "urllib.parse.urlencode", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "25084639994", "text": "import pdb\nimport time\nimport config as CONFIG\nimport logging\nimport datetime\nimport time\nimport numpy as np\nimport ExchangeInterface\nfrom threading import Timer\nimport sklearn\nfrom sklearn import linear_model\nimport talib\nimport os\n\n\nclass RSI_calculator(object):\n def __init__(self):\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n timestr = time.strftime(\"%Y-%m-%d.%H%M%S\")\n handler = logging.FileHandler(filename=CONFIG.STD_PATH + 'logs/RSI_calculator-' + timestr + '.log', mode='w')\n if CONFIG.SIMULATION_MODE:\n handler = logging.FileHandler(filename=CONFIG.STD_PATH + 'logs/Simulation/RSI_calculator-' + timestr + '.log', mode='w')\n\n handler.setFormatter(formatter)\n self.LOG = logging.getLogger(\"RSI Calculator\")\n self.LOG.setLevel(logging.INFO)\n self.LOG.addHandler(handler)\n self.tick = CONFIG.tick\n return\n\n def rsi_calculate(self,period):\n\n if len(self.tick) <= period:\n self.LOG.info(\"Required %d candles but available %d\", period+1, len(self.tick))\n\n else:\n close_prices = []\n cur_time = datetime.datetime.now().replace(microsecond=0)\n cur_min = cur_time.minute - (cur_time.minute % CONFIG.time_interval)\n cur_mkt_idx = cur_time.replace(minute=cur_min, second=0, microsecond=0)\n cur_idx = str(cur_mkt_idx)\n final_idx = str(cur_mkt_idx)\n for i in range(1, len(self.tick)+1):\n while (cur_idx not in self.tick):\n pass\n close_prices.append(self.tick[cur_idx][3])\n cur_mkt_idx = cur_mkt_idx - datetime.timedelta(minutes=CONFIG.time_interval)\n cur_idx = str(cur_mkt_idx)\n close_prices.reverse()\n a = np.asarray(close_prices)\n result = talib.RSI(a, timeperiod=5)\n CONFIG.RSI[final_idx] = (float(format(result[-1], '.2f'))) \n self.LOG.info(\"RSI value for (%d period) at %s is %f\", period , final_idx, CONFIG.RSI[final_idx])\n\n return\n\n\ndef main():\n while (True):\n tick_timestamp = datetime.datetime.now().replace(microsecond=0)\n if (tick_timestamp.minute % CONFIG.time_interval == 0):\n break\n\n obj = RSI_calculator()\n\n if CONFIG.trading_exchange == \"NSE\" or CONFIG.trading_exchange == \"NFO\":\n market_end_time = datetime.datetime.now().replace(hour=CONFIG.CLOSE_HR, minute=CONFIG.CLOSE_MIN, second=0,\n microsecond=0)\n elif CONFIG.trading_exchange == \"MCX\":\n market_end_time = datetime.datetime.now().replace(hour=CONFIG.CLOSE_HR_COMMODITY,\n minute=CONFIG.CLOSE_MIN_COMMODITY, second=0,\n microsecond=0)\n while True:\n Timer(CONFIG.TIMER_STD_VAL, obj.rsi_calculate,[CONFIG.RSI_PERIOD]).run()\n\n if CONFIG.trading_exchange == \"NSE\" or CONFIG.trading_exchange == \"NFO\":\n if (datetime.datetime.now().replace(microsecond=0) >= market_end_time and not CONFIG.SIMULATION_MODE):\n print(\"Exiting RSI thread as NSE/NFO market has closed now \")\n return\n\n elif CONFIG.trading_exchange == \"MCX\":\n if (datetime.datetime.now().replace(microsecond=0) >= market_end_time and not CONFIG.SIMULATION_MODE):\n print(\"Exiting RSI thread as MCX market has closed now \")\n return\n\n return", "repo_name": "deepakmega/multistock", "sub_path": "RSI.py", "file_name": "RSI.py", "file_ext": "py", "file_size_in_byte": 3563, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "logging.Formatter", "line_number": 18, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 19, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 20, "usage_type": "call"}, {"api_name": "config.STD_PATH", "line_number": 20, "usage_type": "attribute"}, {"api_name": "config.SIMULATION_MODE", "line_number": 21, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 22, "usage_type": "call"}, {"api_name": "config.STD_PATH", "line_number": 22, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 25, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 26, "usage_type": "attribute"}, {"api_name": "config.tick", "line_number": 28, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 38, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 38, "usage_type": "attribute"}, {"api_name": "config.time_interval", "line_number": 39, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 47, "usage_type": "call"}, {"api_name": "config.time_interval", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 50, "usage_type": "call"}, {"api_name": "talib.RSI", "line_number": 51, "usage_type": "call"}, {"api_name": "config.RSI", "line_number": 52, "usage_type": "attribute"}, {"api_name": "config.RSI", "line_number": 53, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 60, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 60, "usage_type": "attribute"}, {"api_name": "config.time_interval", "line_number": 61, "usage_type": "attribute"}, {"api_name": "config.trading_exchange", "line_number": 66, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 67, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 67, "usage_type": "attribute"}, {"api_name": "config.CLOSE_HR", "line_number": 67, "usage_type": "attribute"}, {"api_name": "config.CLOSE_MIN", "line_number": 67, "usage_type": "attribute"}, {"api_name": "config.trading_exchange", "line_number": 69, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 70, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 70, "usage_type": "attribute"}, {"api_name": "config.CLOSE_HR_COMMODITY", "line_number": 70, "usage_type": "attribute"}, {"api_name": "config.CLOSE_MIN_COMMODITY", "line_number": 71, "usage_type": "attribute"}, {"api_name": "threading.Timer", "line_number": 74, "usage_type": "call"}, {"api_name": "config.TIMER_STD_VAL", "line_number": 74, "usage_type": "attribute"}, {"api_name": "config.RSI_PERIOD", "line_number": 74, "usage_type": "attribute"}, {"api_name": "config.trading_exchange", "line_number": 76, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 77, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 77, "usage_type": "attribute"}, {"api_name": "config.SIMULATION_MODE", "line_number": 77, "usage_type": "attribute"}, {"api_name": "config.trading_exchange", "line_number": 81, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 82, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 82, "usage_type": "attribute"}, {"api_name": "config.SIMULATION_MODE", "line_number": 82, "usage_type": "attribute"}]} +{"seq_id": "9475414604", "text": "import os\nimport firebase_admin\nfrom firebase_admin import credentials, auth, storage\n\n\ncred = credentials.Certificate(os.path.abspath('./serviceAccount.json'))\nfirebase_admin.initialize_app(cred)\nbucket = storage.bucket('nutricheck-app.appspot.com')\n\n\ndef upload_image_to_firebase(filename, file_path):\n blob = bucket.blob('images/'+filename)\n blob.upload_from_filename(filename=file_path)\n\n\ndef verifyGoogleAccessToken(token):\n return auth.verify_id_token(token)\n", "repo_name": "partheev/nutrition-assistant-webapp", "sub_path": "backend/src/config/firebase.py", "file_name": "firebase.py", "file_ext": "py", "file_size_in_byte": 474, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "97", "api": [{"api_name": "firebase_admin.credentials.Certificate", "line_number": 6, "usage_type": "call"}, {"api_name": "firebase_admin.credentials", "line_number": 6, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "firebase_admin.initialize_app", "line_number": 7, "usage_type": "call"}, {"api_name": "firebase_admin.storage.bucket", "line_number": 8, "usage_type": "call"}, {"api_name": "firebase_admin.storage", "line_number": 8, "usage_type": "name"}, {"api_name": "firebase_admin.auth.verify_id_token", "line_number": 17, "usage_type": "call"}, {"api_name": "firebase_admin.auth", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "7021895240", "text": "import asyncio\nfrom collections.abc import Iterable\nfrom functools import partial\n\nimport zmq\n\nfrom aiozmq import create_zmq_connection\n\nfrom .base import (\n NotFoundError,\n ParametersError,\n Service,\n ServiceClosedError,\n _BaseProtocol,\n _BaseServerProtocol,\n)\nfrom .log import logger\n\n\nasync def connect_pubsub(*, connect=None, bind=None, loop=None, translation_table=None):\n \"\"\"A coroutine that creates and connects/binds pubsub client.\n\n Usually for this function you need to use connect parameter, but\n ZeroMQ does not forbid to use bind.\n\n translation_table -- an optional table for custom value translators.\n\n loop -- an optional parameter to point ZmqEventLoop. If loop is\n None then default event loop will be given by\n asyncio.get_event_loop() call.\n\n Returns PubSubClient instance.\n\n \"\"\"\n if loop is None:\n loop = asyncio.get_event_loop()\n\n transp, proto = await create_zmq_connection(\n lambda: _ClientProtocol(loop, translation_table=translation_table),\n zmq.PUB,\n connect=connect,\n bind=bind,\n loop=loop,\n )\n return PubSubClient(loop, proto)\n\n\nasync def serve_pubsub(\n handler,\n *,\n subscribe=None,\n connect=None,\n bind=None,\n loop=None,\n translation_table=None,\n log_exceptions=False,\n exclude_log_exceptions=(),\n timeout=None\n):\n \"\"\"A coroutine that creates and connects/binds pubsub server instance.\n\n Usually for this function you need to use *bind* parameter, but\n ZeroMQ does not forbid to use *connect*.\n\n handler -- an object which processes incoming pipeline calls.\n Usually you like to pass AttrHandler instance.\n\n log_exceptions -- log exceptions from remote calls if True.\n\n subscribe -- subscription specification. Subscribe server to\n topics. Allowed parameters are str, bytes, iterable\n of str or bytes.\n\n translation_table -- an optional table for custom value translators.\n\n exclude_log_exceptions -- sequence of exception classes than should not\n be logged.\n\n timeout -- timeout for performing handling of async server calls.\n\n loop -- an optional parameter to point ZmqEventLoop. If loop is\n None then default event loop will be given by\n asyncio.get_event_loop() call.\n\n Returns PubSubService instance.\n Raises OSError on system error.\n Raises TypeError if arguments have inappropriate type.\n\n \"\"\"\n if loop is None:\n loop = asyncio.get_event_loop()\n\n transp, proto = await create_zmq_connection(\n lambda: _ServerProtocol(\n loop,\n handler,\n translation_table=translation_table,\n log_exceptions=log_exceptions,\n exclude_log_exceptions=exclude_log_exceptions,\n timeout=timeout,\n ),\n zmq.SUB,\n connect=connect,\n bind=bind,\n loop=loop,\n )\n serv = PubSubService(loop, proto)\n if subscribe is not None:\n if isinstance(subscribe, (str, bytes)):\n subscribe = [subscribe]\n else:\n if not isinstance(subscribe, Iterable):\n raise TypeError(\"bind should be str, bytes or iterable\")\n for topic in subscribe:\n serv.subscribe(topic)\n return serv\n\n\nclass _ClientProtocol(_BaseProtocol):\n def call(self, topic, name, args, kwargs):\n if self.transport is None:\n raise ServiceClosedError()\n if topic is None:\n btopic = b\"\"\n elif isinstance(topic, str):\n btopic = topic.encode(\"utf-8\")\n elif isinstance(topic, bytes):\n btopic = topic\n else:\n raise TypeError(\n \"topic argument should be None, str or bytes \" \"({!r})\".format(topic)\n )\n bname = name.encode(\"utf-8\")\n bargs = self.packer.packb(args)\n bkwargs = self.packer.packb(kwargs)\n self.transport.write([btopic, bname, bargs, bkwargs])\n fut = asyncio.Future()\n fut.set_result(None)\n return fut\n\n\nclass PubSubClient(Service):\n def __init__(self, loop, proto):\n super().__init__(loop, proto)\n\n def publish(self, topic):\n \"\"\"Return object for dynamic PubSub calls.\n\n The usage is:\n await client.publish('my_topic').ns.func(1, 2)\n\n topic argument may be None otherwise must be isntance of str or bytes\n \"\"\"\n return _MethodCall(self._proto, topic)\n\n\nclass PubSubService(Service):\n def subscribe(self, topic):\n \"\"\"Subscribe to the topic.\n\n topic argument must be str or bytes.\n Raises TypeError in other cases\n \"\"\"\n if isinstance(topic, bytes):\n btopic = topic\n elif isinstance(topic, str):\n btopic = topic.encode(\"utf-8\")\n else:\n raise TypeError(\"topic should be str or bytes, got {!r}\".format(topic))\n self.transport.subscribe(btopic)\n\n def unsubscribe(self, topic):\n \"\"\"Unsubscribe from the topic.\n\n topic argument must be str or bytes.\n Raises TypeError in other cases\n \"\"\"\n if isinstance(topic, bytes):\n btopic = topic\n elif isinstance(topic, str):\n btopic = topic.encode(\"utf-8\")\n else:\n raise TypeError(\"topic should be str or bytes, got {!r}\".format(topic))\n self.transport.unsubscribe(btopic)\n\n\nclass _MethodCall:\n\n __slots__ = (\"_proto\", \"_topic\", \"_names\")\n\n def __init__(self, proto, topic, names=()):\n self._proto = proto\n self._topic = topic\n self._names = names\n\n def __getattr__(self, name):\n return self.__class__(self._proto, self._topic, self._names + (name,))\n\n def __call__(self, *args, **kwargs):\n if not self._names:\n raise ValueError(\"PubSub method name is empty\")\n return self._proto.call(self._topic, \".\".join(self._names), args, kwargs)\n\n\nclass _ServerProtocol(_BaseServerProtocol):\n def msg_received(self, data):\n btopic, bname, bargs, bkwargs = data\n\n args = self.packer.unpackb(bargs)\n kwargs = self.packer.unpackb(bkwargs)\n try:\n name = bname.decode(\"utf-8\")\n func = self.dispatch(name)\n args, kwargs = self.check_args(func, args, kwargs)\n except (NotFoundError, ParametersError) as exc:\n fut = asyncio.Future()\n fut.set_exception(exc)\n else:\n if asyncio.iscoroutinefunction(func):\n fut = self.add_pending(func(*args, **kwargs))\n else:\n fut = asyncio.Future()\n try:\n fut.set_result(func(*args, **kwargs))\n except Exception as exc:\n fut.set_exception(exc)\n fut.add_done_callback(\n partial(self.process_call_result, name=name, args=args, kwargs=kwargs)\n )\n\n def process_call_result(self, fut, *, name, args, kwargs):\n self.discard_pending(fut)\n try:\n if fut.result() is not None:\n logger.warning(\"PubSub handler %r returned not None\", name)\n except asyncio.CancelledError:\n return\n except (NotFoundError, ParametersError) as exc:\n logger.exception(\"Call to %r caused error: %r\", name, exc)\n except Exception:\n self.try_log(fut, name, args, kwargs)\n", "repo_name": "aio-libs/aiozmq", "sub_path": "aiozmq/rpc/pubsub.py", "file_name": "pubsub.py", "file_ext": "py", "file_size_in_byte": 7438, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 414, "dataset": "github-code", "pt": "97", "api": [{"api_name": "asyncio.get_event_loop", "line_number": 36, "usage_type": "call"}, {"api_name": "aiozmq.create_zmq_connection", "line_number": 38, "usage_type": "call"}, {"api_name": "zmq.PUB", "line_number": 40, "usage_type": "attribute"}, {"api_name": "asyncio.get_event_loop", "line_number": 91, "usage_type": "call"}, {"api_name": "aiozmq.create_zmq_connection", "line_number": 93, "usage_type": "call"}, {"api_name": "zmq.SUB", "line_number": 102, "usage_type": "attribute"}, {"api_name": "collections.abc.Iterable", "line_number": 112, "usage_type": "argument"}, {"api_name": "base._BaseProtocol", "line_number": 119, "usage_type": "name"}, {"api_name": "base.ServiceClosedError", "line_number": 122, "usage_type": "call"}, {"api_name": "asyncio.Future", "line_number": 137, "usage_type": "call"}, {"api_name": "base.Service", "line_number": 142, "usage_type": "name"}, {"api_name": "base.Service", "line_number": 157, "usage_type": "name"}, {"api_name": "base._BaseServerProtocol", "line_number": 205, "usage_type": "name"}, {"api_name": "base.NotFoundError", "line_number": 215, "usage_type": "name"}, {"api_name": "base.ParametersError", "line_number": 215, "usage_type": "name"}, {"api_name": "asyncio.Future", "line_number": 216, "usage_type": "call"}, {"api_name": "asyncio.iscoroutinefunction", "line_number": 219, "usage_type": "call"}, {"api_name": "asyncio.Future", "line_number": 222, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 228, "usage_type": "call"}, {"api_name": "log.logger.warning", "line_number": 235, "usage_type": "call"}, {"api_name": "log.logger", "line_number": 235, "usage_type": "name"}, {"api_name": "asyncio.CancelledError", "line_number": 236, "usage_type": "attribute"}, {"api_name": "base.NotFoundError", "line_number": 238, "usage_type": "name"}, {"api_name": "base.ParametersError", "line_number": 238, "usage_type": "name"}, {"api_name": "log.logger.exception", "line_number": 239, "usage_type": "call"}, {"api_name": "log.logger", "line_number": 239, "usage_type": "name"}]} +{"seq_id": "24307243476", "text": "\"\"\"\nCreated on Feb 15, 2014\n\n@author: alex\n\"\"\"\n\nfrom sqlalchemy import Column\nfrom sqlalchemy.types import Float\nfrom sqlalchemy.types import Integer\nfrom sqlalchemy.types import SmallInteger\nfrom sqlalchemy.types import Unicode\nfrom sqlalchemy.orm import relationship\n\nfrom farmgui.models import Base\nfrom farmgui.models import InterpolationKnot\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import interpolate\n\n\nclass SetpointInterpolation(Base):\n \"\"\"\n classdocs\n \"\"\"\n __tablename__ = 'SetpointInterpolations'\n\n _id = Column(SmallInteger, primary_key=True, autoincrement=True, nullable=False, unique=True)\n name = Column(Unicode(250))\n order = Column(SmallInteger, nullable=False)\n start_value = Column(Float(), nullable=False)\n end_time = Column(Integer(), nullable=False)\n end_value = Column(Float(), nullable=True)\n description = Column(Unicode(250), nullable=True)\n knots = relationship('InterpolationKnot', backref='interpolation')\n\n f = None\n\n def __init__(self, name, order, start_value, end_time, end_value, description):\n self.name = name\n self.order = order\n self.start_value = start_value\n self.end_time = end_time\n self.end_value = end_value\n self.description = description\n\n @property\n def id(self):\n return self._id\n\n def plot(self, y_axis_name, filename):\n fig = plt.figure(figsize=(5, 3))\n ax = fig.add_axes([0.15, 0.15, 0.8, 0.8])\n ax.set_xlabel('Time')\n ax.set_ylabel(y_axis_name, rotation='horizontal')\n ax.xaxis.grid(color='gray', linestyle='dashed')\n ax.yaxis.grid(color='gray', linestyle='dashed')\n x = []\n y = []\n x.append(0)\n y.append(self.start_value)\n for knot in self.knots:\n x.append(knot.time*self.end_time)\n y.append(knot.value)\n x.append(self.end_time)\n y.append(self.end_value)\n while len(x) <= self.order:\n # not enough knots\n x.append(self.end_time * len(x)/5.0)\n y.append(self.end_value)\n x_inter = np.linspace(0, self.end_time, 100)\n if self.order < 4:\n f = interpolate.interp1d(x, y, kind=self.order)\n y_inter = f(x_inter)\n else:\n f = interpolate.splrep(x, y)\n y_inter = interpolate.splev(x_inter, f)\n\n ax.set_xlim(0, self.end_time)\n ax.set_ylim(y_inter.min()-1, y_inter.max()+1)\n ax.plot(x, y, 'o', x_inter, y_inter, '-')\n fig.savefig(filename)\n\n def calculate_interpolation(self):\n x = []\n y = []\n x.append(0)\n y.append(self.start_value)\n for knot in self.knots:\n x.append(knot.time)\n y.append(knot.value)\n x.append(self.end_time)\n y.append(self.end_value)\n if self.order < 4:\n self.f = interpolate.interp1d(x, y, kind=self.order)\n else:\n self.f = interpolate.splrep(x, y)\n\n def get_value_at(self, interpolation_time):\n if self.f is None:\n self.calculate_interpolation()\n if self.order < 4:\n y = self.f([interpolation_time])[0]\n else:\n y = interpolate.splev([interpolation_time], self.f)[0]\n return round(y.item(), 2)\n\n\ndef init_setpoint_interpolations(db_session):\n h = 3600\n m = 60\n new_inter = SetpointInterpolation('Temperature Interpolation (long day)', 1, 20, 86400, 20, '...')\n new_inter.knots.append(InterpolationKnot(new_inter, 6*h, 20))\n new_inter.knots.append(InterpolationKnot(new_inter, 8*h, 25))\n new_inter.knots.append(InterpolationKnot(new_inter, 22*h, 25))\n db_session.add(new_inter)\n new_inter = SetpointInterpolation('Humidity Interpolation (long day)', 1, 70, 86400, 70, '...')\n new_inter.knots.append(InterpolationKnot(new_inter, 6*h, 70))\n new_inter.knots.append(InterpolationKnot(new_inter, 8*h, 50))\n new_inter.knots.append(InterpolationKnot(new_inter, 22*h, 50))\n db_session.add(new_inter)\n new_inter = SetpointInterpolation('Red Light Interpolation (long day)', 1, 0, 86400, 0, '...')\n new_inter.knots.append(InterpolationKnot(new_inter, 3*h, 0))\n new_inter.knots.append(InterpolationKnot(new_inter, 3*h+30*m, 100))\n new_inter.knots.append(InterpolationKnot(new_inter, 20*h+30*m, 100))\n new_inter.knots.append(InterpolationKnot(new_inter, 21*h, 0))\n db_session.add(new_inter)\n new_inter = SetpointInterpolation('Test Interpolation', 1, 0, 86400, 0, '...')\n new_inter.knots.append(InterpolationKnot(new_inter, 4*h, 100))\n new_inter.knots.append(InterpolationKnot(new_inter, 8*h, 0))\n new_inter.knots.append(InterpolationKnot(new_inter, 12*h, 100))\n new_inter.knots.append(InterpolationKnot(new_inter, 16*h, 0))\n new_inter.knots.append(InterpolationKnot(new_inter, 20*h, 100))\n db_session.add(new_inter)\n", "repo_name": "AlexanderLang/OpenAutomatedFarm", "sub_path": "FarmGUI/farmgui/models/SetpointInterpolation.py", "file_name": "SetpointInterpolation.py", "file_ext": "py", "file_size_in_byte": 4921, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "97", "api": [{"api_name": "matplotlib.use", "line_number": 18, "usage_type": "call"}, {"api_name": "farmgui.models.Base", "line_number": 24, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlalchemy.types.SmallInteger", "line_number": 30, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.types.Unicode", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.types.SmallInteger", "line_number": 32, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy.types.Float", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlalchemy.types.Integer", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlalchemy.types.Float", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 36, "usage_type": "call"}, {"api_name": "sqlalchemy.types.Unicode", "line_number": 36, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 73, "usage_type": "call"}, {"api_name": "scipy.interpolate.interp1d", "line_number": 75, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 75, "usage_type": "name"}, {"api_name": "scipy.interpolate.splrep", "line_number": 78, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 78, "usage_type": "name"}, {"api_name": "scipy.interpolate.splev", "line_number": 79, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 79, "usage_type": "name"}, {"api_name": "scipy.interpolate.interp1d", "line_number": 97, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 97, "usage_type": "name"}, {"api_name": "scipy.interpolate.splrep", "line_number": 99, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 99, "usage_type": "name"}, {"api_name": "scipy.interpolate.splev", "line_number": 107, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 107, "usage_type": "name"}, {"api_name": "farmgui.models.InterpolationKnot", "line_number": 115, "usage_type": "call"}, {"api_name": "farmgui.models.InterpolationKnot", "line_number": 116, "usage_type": "call"}, {"api_name": "farmgui.models.InterpolationKnot", "line_number": 117, "usage_type": "call"}, {"api_name": "farmgui.models.InterpolationKnot", "line_number": 120, "usage_type": "call"}, {"api_name": "farmgui.models.InterpolationKnot", "line_number": 121, "usage_type": "call"}, {"api_name": "farmgui.models.InterpolationKnot", "line_number": 122, "usage_type": "call"}, {"api_name": "farmgui.models.InterpolationKnot", "line_number": 125, "usage_type": "call"}, {"api_name": "farmgui.models.InterpolationKnot", "line_number": 126, "usage_type": "call"}, {"api_name": "farmgui.models.InterpolationKnot", "line_number": 127, "usage_type": "call"}, {"api_name": "farmgui.models.InterpolationKnot", "line_number": 128, "usage_type": "call"}, {"api_name": "farmgui.models.InterpolationKnot", "line_number": 131, "usage_type": "call"}, {"api_name": "farmgui.models.InterpolationKnot", "line_number": 132, "usage_type": "call"}, {"api_name": "farmgui.models.InterpolationKnot", "line_number": 133, "usage_type": "call"}, {"api_name": "farmgui.models.InterpolationKnot", "line_number": 134, "usage_type": "call"}, {"api_name": "farmgui.models.InterpolationKnot", "line_number": 135, "usage_type": "call"}]} +{"seq_id": "30855868023", "text": "# -*- coding: utf-8 -*-\nimport re\nfrom markdown import Extension\nfrom markdown.util import etree\nfrom collections import OrderedDict\nfrom markdown.preprocessors import Preprocessor\n\n\np = re.compile(\n r'('\n r'^(?P<youtube>https?\\://www\\.youtube\\.com/watch\\?v=(?P<youtube_vid>[^?]+))$'\n r'|'\n r'^(?P<youtube2><iframe.*?src=\"//www\\.youtube\\.com/embed/(?P<youtube2_vid>.+?)\".*?>\\s*</iframe>)$'\n r'|'\n r'^(?P<vimeo>https?\\://vimeo\\.com/(?P<vimeo_vid>\\d+))$'\n r'|'\n r'^(?P<vimeo2><iframe.*?src=\"//player\\.vimeo\\.com/video/(?P<vimeo2_vid>\\d+?)\".*?>\\s*</iframe>)$'\n r'|'\n r'^(?P<ted>https?\\://www\\.ted\\.com/(?P<ted_vid>talks/.+\\.html))$'\n r'|'\n r'^(?P<ted2><iframe.*?src=\"https?\\://embed\\.ted\\.com/(?P<ted2_vid>talks/.+\\.html)\".*?>\\s*</iframe>)$'\n r'|'\n r'^(?P<prezi>https?\\://prezi\\.com/(?P<prezi_vid>.+?)/.+?/#)$'\n r'|'\n r'^(?P<prezi2><iframe.*?src=\"https?\\://prezi\\.com/embed/(?P<prezi2_vid>.+?)/.+?\".*?>\\s*</iframe>)$'\n r'|'\n r'^(?P<slideshare><iframe.*?src=\"https?\\://www\\.slideshare\\.net/slideshow/embed\\_code/(?P<slideshare_vid>\\d+?)\".*?>\\s*</iframe>\\s*<div.+?</div>)$'\n r'|'\n r'^(?P<gcal><iframe.*?src=\"https?\\://www\\.google\\.com/calendar/embed\\?(?P<gcal_vid>.+?)\".*?>\\s*</iframe>)$'\n r'|'\n r'^(?P<googlemap>https?\\://maps\\.google\\.com/(?P<googlemap_vid>.+?))$'\n r'|'\n r'^(?P<googlemap2>https?\\://www\\.google\\.com/maps/(?P<googlemap2_vid>.+?))$'\n r'|'\n r'^(?P<googlemap3><iframe.*?src=\"https?\\://maps\\.google\\.com/(?P<googlemap3_vid>.+?)\".*?>\\s*</iframe>)$'\n r'|'\n r'^(?P<navermap><table.*?td.*?><a href=\"(?P<navermap_url>http://map.naver.com.*?)\"\\s+.*><img src=\"(?P<navermap_imgsrc>http://.*?map.naver.com.*?)\".*</a></td>.*</table>)$'\n r'|'\n r'^(?P<daummap><a href=\"(?P<daummap_url>http://map.daum.net.*?)\"\\s+.*<img.*src=\"(?P<daummap_imgsrc>http://map.*?.daum.net.*?)\"\\s+.*)$'\n r'|'\n r'^(?P<googless><iframe.*?src=\"https?\\://docs.google.com/spreadsheets/(?P<googless_vid>.+?)\".*?>\\s*</iframe>)$'\n r'|'\n r'^(?P<googless2>https?\\://docs.google.com/spreadsheets/(?P<googless2_vid>.+?))$'\n r')'\n)\n\n\nclass EmbedPrepreprocessor(Preprocessor):\n def run(self, lines):\n for i, line in enumerate(lines):\n m = p.search(line.strip())\n if m:\n lines[i] = self.process(m)\n return lines\n\n def process(self, m):\n if m.group('youtube'):\n return self._create_video(m, 'youtube', 640, 390, 'http://www.youtube.com/embed/%s')\n elif m.group('youtube2'):\n return self._create_video(m, 'youtube2', 640, 390, 'http://www.youtube.com/embed/%s')\n elif m.group('vimeo'):\n return self._create_video(m, 'vimeo', 500, 281, 'http://player.vimeo.com/video/%s')\n elif m.group('vimeo2'):\n return self._create_video(m, 'vimeo2', 500, 281, 'http://player.vimeo.com/video/%s')\n elif m.group('ted'):\n return self._create_video(m, 'ted', 560, 315, 'http://embed.ted.com/%s')\n elif m.group('ted2'):\n return self._create_video(m, 'ted2', 560, 315, 'http://embed.ted.com/%s')\n elif m.group('prezi'):\n return self._create_video(m, 'prezi', 550, 400, 'http://prezi.com/embed/%s/?bgcolor=ffffff&lock_to_path=0&autoplay=0&autohide_ctrls=0&features=undefined&disabled_features=undefined')\n elif m.group('prezi2'):\n return self._create_video(m, 'prezi2', 550, 400, 'http://prezi.com/embed/%s/?bgcolor=ffffff&lock_to_path=0&autoplay=0&autohide_ctrls=0&features=undefined&disabled_features=undefined')\n elif m.group('slideshare'):\n return self._create_video(m, 'slideshare', 425, 355, 'http://www.slideshare.net/slideshow/embed_code/%s')\n elif m.group('gcal'):\n return self._create_video(m, 'gcal', 800, 600, 'http://www.google.com/calendar/embed?%s')\n elif m.group('googlemap'):\n return self._create_video(m, 'googlemap', 425, 350, 'http://maps.google.com/%s&output=embed')\n elif m.group('googlemap2'):\n return self._create_video(m, 'googlemap2', 425, 350, 'http://www.google.com/maps/%s&output=embed')\n elif m.group('googlemap3'):\n return self._create_video(m, 'googlemap3', 425, 350, 'http://maps.google.com/%s')\n elif m.group('googless'):\n return self._create_video(m, 'googless', 640, 480, 'http://docs.google.com/spreadsheets/%s')\n elif m.group('googless2'):\n return self._create_video(m, 'googless2', 640, 480, 'http://docs.google.com/spreadsheets/%s?widget=true&headers=false')\n elif m.group('navermap'):\n return self._create_video_without_iframe(m, 'navermap', 460, 340)\n elif m.group('daummap'):\n return self._create_video_without_iframe(m, 'daummap', 500, 350)\n else:\n raise ValueError('Should not reach here')\n\n def _create_video_without_iframe(self, m, vtype, width, height):\n url = m.group('%s_url' % vtype)\n imgsrc = m.group('%s_imgsrc' % vtype)\n return \"<div class=\\\"video %s\\\"><a href=\\\"%s\\\"><img src=\\\"%s\\\"></a></div>\" % (vtype, url, imgsrc)\n\n def _create_video(self, m, vtype, width, height, url):\n vid = m.group('%s_vid' % vtype)\n url = url % vid\n url = url.replace('&', '&')\n\n div = etree.Element('div')\n div.set('class', 'video %s' % vtype)\n iframe = etree.SubElement(div, 'iframe')\n iframe.set('allowfullscreen', 'true')\n iframe.set('frameborder', '0')\n iframe.set('width', str(width))\n iframe.set('height', str(height))\n iframe.set('scrolling', 'no')\n iframe.set('src', url)\n\n return etree.tostring(div)\n\n\nclass EmbedExtension(Extension):\n def extendMarkdown(self, md, md_globals):\n preprocessors = md.preprocessors.items()\n preprocessors.insert(0, ('embed', EmbedPrepreprocessor(md.parser)))\n md.preprocessors = OrderedDict(preprocessors)\n", "repo_name": "akngs/ecogwiki", "sub_path": "markdownext/md_embed.py", "file_name": "md_embed.py", "file_ext": "py", "file_size_in_byte": 5950, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 78, "dataset": "github-code", "pt": "94", "api": [{"api_name": "re.compile", "line_number": 9, "usage_type": "call"}, {"api_name": "markdown.preprocessors.Preprocessor", "line_number": 48, "usage_type": "name"}, {"api_name": "markdown.util.etree.Element", "line_number": 104, "usage_type": "call"}, {"api_name": "markdown.util.etree", "line_number": 104, "usage_type": "name"}, {"api_name": "markdown.util.etree.SubElement", "line_number": 106, "usage_type": "call"}, {"api_name": "markdown.util.etree", "line_number": 106, "usage_type": "name"}, {"api_name": "markdown.util.etree.tostring", "line_number": 114, "usage_type": "call"}, {"api_name": "markdown.util.etree", "line_number": 114, "usage_type": "name"}, {"api_name": "markdown.Extension", "line_number": 117, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 121, "usage_type": "call"}]} +{"seq_id": "43141660461", "text": "# IMPORT LIBRARIES\n\nimport gym\nimport numpy as np\nimport glfw\nimport random\nimport pickle\n# CONFIGURE HYPERPARAMETERS\n\n# ACTION_LEN = 1000\n# FIRST_GENERATION_SIZE = 500\n# NEXT_GENERATION_SIZE = 200\n# NUM_GENERATIONS = 30\n# DECIMAL_PERISH = 0.6\n# CHANCE_OF_MUTATION = 0.05\n\nACTION_LEN = 1000\nFIRST_GENERATION_SIZE = 1000\n\nDEFAULT_NEXT_GENERATION_SIZE = 500\nNEXT_GENERATION_SIZE = DEFAULT_NEXT_GENERATION_SIZE\n\nNUM_GENERATIONS = 100000\nDECIMAL_PERISH = 0.6\n\nDEFAULT_CHANCE_OF_MUTATION = 0.25\nCHANCE_OF_MUTATION = DEFAULT_CHANCE_OF_MUTATION\n\nPREVIOUS_MAX_REWARD = 0\nNUM_EQUAL_MAX_REWARDS = 0\n\n# ENVIRONMENT SETUP\nnp.random.seed(2)\nrandom.seed(2)\n# env = gym.make('Ant-v2')\nenv = gym.make('Humanoid-v2')\n# env = gym.make('HalfCheetah-v2')\nobs = env.reset()\nobs_dim = env.observation_space.shape[0]\nact_dim = env.action_space.shape[0]\n\nprint(obs_dim, act_dim)\n#env.render()\n\nf = open(\"results.txt\", \"w\")\nf.close()\n\nclass Candidate:\n num_instances = 1\n\n def __init__(self, action_len, gen):\n self.action_len = action_len\n self.gen = gen\n self.cand_num = Candidate.num_instances\n self.reward = 0\n self.moves = []\n Candidate.num_instances += 1\n \n def set_reward(self, reward):\n self.reward = reward\n \n def add_move(self, move):\n self.moves.append(move)\n \n def __str__(self):\n cand_str = \"Candidate {cand_num}, Generation {gen}, Reward {reward}\\n\\n\"\n return cand_str.format(cand_num=self.cand_num, gen=self.gen, reward=self.reward)\n \n def __add__(self, other):\n # mates self with other, produces offspring\n offspring = Candidate(self.action_len, self.gen + 1)\n \n split_point = (int) (self.action_len/2)\n \n # coin flip\n \n \n if (np.random.choice([True, False])):\n for i in range(self.action_len):\n mutate = True if (random.random() <= CHANCE_OF_MUTATION) else False\n\n if (mutate):\n #print(\"MUTATING\")\n offspring.add_move(generate_move())\n else:\n if (i > split_point):\n offspring.add_move(self.moves[i])\n else:\n offspring.add_move(other.moves[i])\n else:\n for i in range(self.action_len):\n mutate = True if (random.random() <= CHANCE_OF_MUTATION) else False\n\n if (mutate):\n #print(\"MUTATING\") \n offspring.add_move(generate_move())\n else: \n if (i > split_point):\n offspring.add_move(other.moves[i])\n else:\n offspring.add_move(self.moves[i]) \n return offspring\n# def __add__(self, other):\n# # mates self with other, produces offspring\n# offspring = Candidate(self.action_len, self.gen + 1)\n \n# split_point = (int) (self.action_len/2)\n \n# # coin flip\n \n \n# if (np.random.choice([True, False])):\n# for i in range(self.action_len):\n# mutate = True if (random.random() <= CHANCE_OF_MUTATION) else False\n\n# if (mutate):\n# #print(\"MUTATING\")\n# offspring.add_move(generate_move())\n# else:\n# if (i % 4 == 0 or i % 4 == 1):\n# offspring.add_move(self.moves[i])\n# else:\n# offspring.add_move(other.moves[i])\n# else:\n# for i in range(self.action_len):\n# mutate = True if (random.random() <= CHANCE_OF_MUTATION) else False\n\n# if (mutate):\n# #print(\"MUTATING\") \n# offspring.add_move(generate_move())\n# else: \n# if (i % 4 == 0 or i % 4 == 1):\n# offspring.add_move(other.moves[i])\n# else:\n# offspring.add_move(self.moves[i]) \n# return offspring \n \ndef generate_move():\n return np.random.uniform(low=-1, high=1, size=act_dim)\n\n\ndef generate_candidate():\n\n initialCandidate = Candidate(ACTION_LEN, 0)\n for i in range(initialCandidate.action_len):\n move = generate_move()\n initialCandidate.add_move(move)\n return initialCandidate\n\ndef generate_first_generation():\n population = []\n for i in range(FIRST_GENERATION_SIZE):\n population.append(generate_candidate())\n return population\n\n# def generate_first_generation():\n# population = []\n# cand = generate_candidate()\n# for i in range(FIRST_GENERATION_SIZE):\n# population.append(cand)\n# return population\n\ndef perform_natural_selection(current_population):\n parents = []\n NUM_PERISH = (int) (NEXT_GENERATION_SIZE * DECIMAL_PERISH)\n \n sorted_by_reward = sorted(current_population, key=lambda cand: cand.reward) \n #print(\"SORTED LENGTH\", len(sorted_by_reward))\n \n #for i in range(len(sorted_by_reward)):\n # print(sorted_by_reward[i])\n selected = sorted_by_reward[NUM_PERISH:]\n #print(\"SELECTED LENGTH\", len(selected))\n #for i in range(len(selected)):\n # print(selected[i]) \n best = selected[-1]\n #print(\"BEST\", best)\n num_to_add = 1\n for candidate in selected:\n for i in range(num_to_add):\n parents.append(candidate)\n num_to_add += 1\n #print(\"PARENTS LENGTH\", len(parents))\n random.shuffle(parents)\n return parents, best\n\ndef create_offspring(parents, best):\n offspring = []\n offspring.append(best)\n \n for i in range(NEXT_GENERATION_SIZE-1):\n f_parent_ind = random.randint(0, len(parents) - 1)\n s_parent_ind = random.randint(0, len(parents) - 1)\n \n f_parent = parents[f_parent_ind]\n s_parent = parents[s_parent_ind]\n \n child = f_parent + s_parent\n #print(type(child))\n offspring.append(child)\n\n return offspring\n\ndef update_params_after_gen():\n global NEXT_GENERATION_SIZE\n global CHANCE_OF_MUTATION\n NEXT_GENERATION_SIZE = (int) (NEXT_GENERATION_SIZE * 0.8)\n if (NEXT_GENERATION_SIZE < 200):\n NEXT_GENERATION_SIZE = 250\n CHANCE_OF_MUTATION = CHANCE_OF_MUTATION * 0.8\n\ndef update_after_stagnation():\n # update mutation rate after stagnation\n global CHANCE_OF_MUTATION\n global DEFAULT_CHANCE_OF_MUTATION\n global NEXT_GENERATION_SIZE\n global DEFAULT_NEXT_GENERATION_SIZE \n\n CHANCE_OF_MUTATION = CHANCE_OF_MUTATION * 2\n NEXT_GENERATION_SIZE = NEXT_GENERATION_SIZE * 2\n \n if (CHANCE_OF_MUTATION > DEFAULT_CHANCE_OF_MUTATION):\n CHANCE_OF_MUTATION = DEFAULT_CHANCE_OF_MUTATION/2\n \n if (NEXT_GENERATION_SIZE > DEFAULT_NEXT_GENERATION_SIZE):\n NEXT_GENERATION_SIZE = DEFAULT_NEXT_GENERATION_SIZE\n\ndef evolve():\n global PREVIOUS_MAX_REWARD\n global NUM_EQUAL_MAX_REWARD\n print('START EVOLUTION')\n population = generate_first_generation()\n \n for generation in range(NUM_GENERATIONS):\n for candidate in population:\n env.seed(1)\n env.reset()\n\n total_reward = 0\n cand_done = False\n moves_taken = 0\n while (not cand_done):\n # while ((not cand_done) and (moves_taken < candidate.action_len)):\n #env.render()\n move = candidate.moves[moves_taken].reshape((1,-1)).astype(np.float32)\n obs, reward, done, _ = env.step(np.squeeze(move, axis=0))\n cand_done = done\n total_reward += reward\n moves_taken += 1\n #print(moves_taken, candidate.action_len, len(candidate.moves))\n candidate.set_reward(total_reward)\n #env.reset_model()\n\n parents, best = perform_natural_selection(population)\n population = create_offspring(parents, best)\n \n if (PREVIOUS_MAX_REWARD == best.reward):\n NUM_EQUAL_MAX_REWARD += 1\n else:\n PREVIOUS_MAX_REWARD = best.reward\n NUM_EQUAL_MAX_REWARD = 0\n \n if (NUM_EQUAL_MAX_REWARD >= 3):\n update_after_stagnation()\n \n\n\n \n update_params_after_gen()\n #sorted_by_reward = sorted(population, key=lambda cand: cand.reward)\n #print(\"best of offspring\", sorted_by_reward[-1])\n\n print('GENERATION : ', generation, ', BEST : ', best)\n f = open(\"results.txt\", \"a\")\n strout = str(best.cand_num) + \":\" + str(best.reward) + \"\\n\"\n f.write(strout)\n f.close()\n with open('bestAction.pkl', 'wb') as bestAction:\n pickle.dump(best.moves, bestAction)\n\n\n\n print('EVOLUTION HAS FINISHED')\n \nevolve()\nenv.close()\nglfw.terminate()", "repo_name": "ndriker/mudocoProject", "sub_path": "ga/nesterov_og_genetics.py", "file_name": "nesterov_og_genetics.py", "file_ext": "py", "file_size_in_byte": 8953, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "numpy.random.seed", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 33, "usage_type": "attribute"}, {"api_name": "random.seed", "line_number": 34, "usage_type": "call"}, {"api_name": "gym.make", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 78, "usage_type": "attribute"}, {"api_name": "random.random", "line_number": 80, "usage_type": "call"}, {"api_name": "random.random", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 139, "usage_type": "attribute"}, {"api_name": "random.shuffle", "line_number": 184, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 192, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 245, "usage_type": "attribute"}, {"api_name": "numpy.squeeze", "line_number": 246, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 279, "usage_type": "call"}, {"api_name": "glfw.terminate", "line_number": 287, "usage_type": "call"}]} +{"seq_id": "21050024702", "text": "import requests\nimport re\nimport os\n#需求:爬取糗事百科中糗图热图板块下所有的热图图片\nif __name__ == \"__main__\":\n #创建一个文件夹,保存所有的图片\n if not os.path.exists('./rtLibs'):\n os.mkdir('./rtLibs')\n\n url = 'https://www.qiushibaike.com/imgrank/'\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36'\n\n }\n #使用通用爬虫对url对应的一整张页面进行爬取\n page_text = requests.get(url=url,headers=headers).content.decode('utf-8')\n #print(page_text)\n #使用聚焦爬虫将页面中所有的糗图进行解析/提取\n ex = '<div class=\"thumb\">.*?<img src=\"(.*?)\" alt.*?</div>'\n\n img_src_list = re.findall(ex,page_text,re.S)\n # print(img_src_list)\n for src in img_src_list:\n print(src)\n #拼接出一个完整的图片url\n src = 'https:'+src\n #请求到了图片的二进制数据\n img_data = requests.get(url=src,headers=headers).content\n #生成图片名称\n img_name = src.split('/')[-1]\n #图片存储的路径\n imgPath = './rtLibs/'+img_name\n with open(imgPath,'wb') as fp:\n fp.write(img_data)\n print(img_name,'下载成功!!!')", "repo_name": "531207502/code_save", "sub_path": "python-pc/2021-08-23爬虫/三.数据解析章节/6基础-正则表达式/正则表达式爬取糗图图片.py", "file_name": "正则表达式爬取糗图图片.py", "file_ext": "py", "file_size_in_byte": 1336, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "os.path.exists", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 8, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 16, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 21, "usage_type": "call"}, {"api_name": "re.S", "line_number": 21, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "17911924357", "text": "# Script for back-end by Wei Wei Wen, 2018\n#\n# GOAL: To fetch data from Bitcoin API (15 mins)\n\n#!/home/ec2-user/softwares/python3.7/bin/python3\n# Import libraries\nfrom datetime import date\nimport time\nimport requests\nimport json\nimport os\n\napi_url = \"https://blockchain.info/ticker\"\nresp = requests.get(api_url)\n\n#Define function get_task\ndef get_task():\n return requests.post(api_url)\ntask = get_task()\ntask.json()\n\n#Define function get_filename_datetime\ndef get_filename_datetime():\n return \"bitcoin-\" + time.strftime('%Y%m%d%H%M%S') + \".csv\"\nfilename = get_filename_datetime()\n\nif os.path.exists('./bitcoin_files_15mins'):\n os.chdir('./bitcoin_files_15mins')\nelse:\n os.mkdir('./bitcoin_files_15mins')\n os.chdir('./bitcoin_files_15mins')\n\nwith open(filename,\"w+\") as file:\n file.write(\"Currency,Buy,Sell,Symbol\\n\")\n for key,value in task.json().items():\n file.write(key + \",\")\n for key,value in value.items():\n if(key == \"buy\"):\n file.write(str(value) + \",\")\n if(key == \"sell\"):\n file.write(str(value) + \",\")\n if(key == \"symbol\"):\n file.write(value)\n file.write(\"\\n\")\nfile.close()\n", "repo_name": "rockmanvnx6/BitStat", "sub_path": "server/get_data_from_bitcoin_15mins.py", "file_name": "get_data_from_bitcoin_15mins.py", "file_ext": "py", "file_size_in_byte": 1237, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "94", "api": [{"api_name": "requests.get", "line_number": 14, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 18, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 28, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 30, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "3016727160", "text": "import textwrap\n\nfrom boddle import boddle # type: ignore [import]\nfrom webtest import TestApp # type: ignore [import]\n\nfrom liscopridge.app import root\n\n\ndef test_hello():\n with boddle():\n assert root.hello() == \"Hello World\"\n assert root.hello(\"Tom\") == \"Hello Tom\"\n\n with boddle(params={'name': \"Tom\"}):\n assert root.hello() == \"Hello Tom\"\n\n webapp = TestApp(root.app)\n assert str(webapp.get(\"/hello/Tom\")) == textwrap.dedent(\"\"\"\n Response: 200 OK\n Content-Type: text/plain; charset=UTF-8\n Hello Tom\n \"\"\").strip('\\n')\n assert webapp.get(\"/hello?name=Tom\").body == b\"Hello Tom\"\n", "repo_name": "liskin/liscopridge", "sub_path": "tests/test_hello.py", "file_name": "test_hello.py", "file_ext": "py", "file_size_in_byte": 646, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "boddle.boddle", "line_number": 10, "usage_type": "call"}, {"api_name": "liscopridge.app.root.hello", "line_number": 11, "usage_type": "call"}, {"api_name": "liscopridge.app.root", "line_number": 11, "usage_type": "name"}, {"api_name": "liscopridge.app.root.hello", "line_number": 12, "usage_type": "call"}, {"api_name": "liscopridge.app.root", "line_number": 12, "usage_type": "name"}, {"api_name": "boddle.boddle", "line_number": 14, "usage_type": "call"}, {"api_name": "liscopridge.app.root.hello", "line_number": 15, "usage_type": "call"}, {"api_name": "liscopridge.app.root", "line_number": 15, "usage_type": "name"}, {"api_name": "webtest.TestApp", "line_number": 17, "usage_type": "call"}, {"api_name": "liscopridge.app.root.app", "line_number": 17, "usage_type": "attribute"}, {"api_name": "liscopridge.app.root", "line_number": 17, "usage_type": "name"}, {"api_name": "textwrap.dedent", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "72652164149", "text": "import pygame\nimport os, sys\n\nwidth, height = 400, 400\nos.environ['SDL_VIDEO_CENTERED'] = '1'\nscreen = pygame.display.set_mode((width, height))\n\nclass Core(object):\n def __init__(self, surface, name):\n pygame.display.set_caption(name)\n self.screen = surface\n \n def dispatch(self, event):\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:\n sys.exit() \n elif event.type == pygame.MOUSEBUTTONDOWN:\n pass\n \n def run(self):\n while True:\n for event in pygame.event.get():\n self.dispatch(event)\n \n self.screen.fill([0xFF, 0xFF, 0xFF])\n pygame.display.flip()\n\nif __name__ == '__main__':\n main = Core(screen, 'Node')\n main.run()\n", "repo_name": "gistable/gistable", "sub_path": "dockerized-gists/3252374/snippet.py", "file_name": "snippet.py", "file_ext": "py", "file_size_in_byte": 865, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 74, "dataset": "github-code", "pt": "94", "api": [{"api_name": "os.environ", "line_number": 5, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 6, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 6, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 14, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 16, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 27, "usage_type": "attribute"}]} +{"seq_id": "36457669420", "text": "# -*- coding: utf-8 -*-\n\nimport base64\nimport os\nimport ssl\nimport sys\nimport time\nimport requests\nimport undetected_chromedriver as uc\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.action_chains import ActionChains\n#from pyvirtualdisplay import Display\nfrom helium import *\n\n# 关闭证书验证\nssl._create_default_https_context = ssl._create_unverified_context\n\ntry:\n\tTG_BOT_TOKEN = os.environ['TG_BOT_TOKEN']\nexcept:\n\t# 本地调试用\n\tTG_BOT_TOKEN = ''\n\ntry:\n\tTG_USER_ID = os.environ['TG_USER_ID']\nexcept:\n\t# 本地调试用\n\tTG_USER_ID = ''\n\ntry:\n\tEMAIL = os.environ['EMAIL']\nexcept:\n\t# 本地调试用\n\tEMAIL = ''\n\ntry:\n\tPWD = os.environ['PWD']\nexcept:\n\t# 本地调试用\n\tPWD = ''\n\npolicy={\n\n }\n\ndef urlDecode(s):\n\treturn str(base64.b64decode(s + '=' * (4 - len(s) % 4))).split('\\'')[1]\n\ndef delay(i):\n\ttime.sleep(i)\n\ndef push(body):\n\tprintRealTime('- waiting for push result')\n\t# tg push\n\tif TG_BOT_TOKEN == '' or TG_USER_ID == '':\n\t\tprintRealTime('*** No TG_BOT_TOKEN or TG_USER_ID ***')\n\telse:\n\t\tserver = 'https://api.telegram.org'\n\t\ttgurl = server + '/bot' + TG_BOT_TOKEN + '/sendMessage'\n\t\trq_tg = requests.post(tgurl, data={'chat_id': TG_USER_ID, 'text': body}, headers={\n\t\t\t'Content-Type': 'application/x-www-form-urlencoded'})\n\t\tif rq_tg.status_code == 200:\n\t\t\tprintRealTime('- tg push Done!')\n\t\telse:\n\t\t\tprintRealTime('*** tg push fail! ***', rq_tg.content.decode('utf-8'))\n\tprintRealTime('- finish!')\n\t# kill_browser()\n\ndef login():\n\tprintRealTime('开始登录')\n\tdelay(2)\n\twrite(EMAIL, into=S('#login-username'))\n\tdelay(2)\n\twrite(PWD, into=S('#login-password'))\n\tdelay(2)\n\tclick('登录')\n\ndef getCountry():\n\tdelay(2)\n\tgo_to(urlCntentPage)\n\tdelay(2)\n\ttable = driver.find_element(By.CLASS_NAME, 'Table__TableElement-sc-evwssh-0.dIoJPZ')\n\ttable_rows = table.find_elements(By.TAG_NAME, 'tr')\n\tprintRealTime('***总行数:'+str(len(table_rows)))\n\t\ndef printRealTime(msg):\n\tprint(msg, flush=True)\n\n\n##\nurlLogin = urlDecode('aHR0cHM6Ly9hY2NvdW50cy5zcG90aWZ5LmNvbS96aC1DTi9sb2dpbg==')\nurlCntentPage = urlDecode('aHR0cHM6Ly93d3cuc3BvdGlmeS5jb20vaGstemgvYWNjb3VudC9vdmVydmlldy8=')\n\n##\nblock = False\n# robot = 0\n#display = Display(visible=0, size=(800, 800))\n#display.start()\nprintRealTime('- loading...')\ndriver = uc.Chrome( use_subprocess=True)\ndriver.set_window_size(785, 650)\ndelay(2)\nset_driver(driver)\nprintRealTime('- 完成初始化...')\n\nr = requests.get(\"http://ip.p3terx.com\")\nprintRealTime(str(r.text))\n\ngo_to(urlLogin)\nlogin()\ngetCountry()\n", "repo_name": "yeslight/seleniumPractice", "sub_path": "spotifyLogin.py", "file_name": "spotifyLogin.py", "file_ext": "py", "file_size_in_byte": 2493, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "ssl._create_default_https_context", "line_number": 16, "usage_type": "attribute"}, {"api_name": "ssl._create_unverified_context", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 37, "usage_type": "attribute"}, {"api_name": "base64.b64decode", "line_number": 47, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 50, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 60, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CLASS_NAME", "line_number": 82, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 82, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.TAG_NAME", "line_number": 83, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 83, "usage_type": "name"}, {"api_name": "undetected_chromedriver.Chrome", "line_number": 100, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 106, "usage_type": "call"}]} +{"seq_id": "29175436011", "text": "import matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nplt.style.use('seaborn-darkgrid')\nimport argparse\nimport csv\nimport pandas\nimport os\nimport sys\nimport pickle\nimport numpy as np\nfrom os.path import join\n\n# matplotlib\ntitlesize = 33\nxsize = 30\nysize = 30\nticksize = 25\nlegendsize = 25\nerror_region_alpha = 0.25\n\n\ndef smoothed(x, w):\n \"\"\"Smooth x by averaging over sliding windows of w, assuming sufficient length.\n \"\"\"\n if len(x) <= w:\n return x\n smooth = []\n for i in range(1, w):\n smooth.append( np.mean(x[0:i]) )\n for i in range(w, len(x)+1):\n smooth.append( np.mean(x[i-w:i]) )\n assert len(x) == len(smooth), \"lengths: {}, {}\".format(len(x), len(smooth))\n return np.array(smooth)\n\n\ndef _get_stuff_from_monitor(mon):\n \"\"\"Get stuff from `monitor` log files.\n\n Monitor files are named `0.envidx.monitor.csv` and have one line for each\n episode that finished in that CPU 'core', with the reward, length (number\n of steps) and the time (in seconds). The lengths are not cumulative, but\n time is cumulative.\n \"\"\"\n scores = []\n steps = []\n times = []\n with open(mon, 'r') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for csv_row in csv_reader:\n # First two lines don't contain interesting stuff.\n if line_count == 0 or line_count == 1:\n line_count += 1\n continue\n scores.append(float(csv_row[0]))\n steps.append(int(csv_row[1]))\n times.append(float(csv_row[2]))\n line_count += 1\n print(\"finished: {}\".format(mon))\n return scores, steps, times\n\n\ndef plot(args):\n \"\"\"Load monitor curves and the progress csv file. And plot from those.\n \"\"\"\n nrows, ncols = 1, 2\n fig, ax = plt.subplots(nrows, ncols, squeeze=False, sharey=True, figsize=(11*ncols,7*nrows))\n title = args.title\n\n # Global statistics across all monitors\n scores_all = []\n steps_all = []\n times_all = []\n total_train_steps = 0\n train_hours = 0\n\n monitors = sorted(\n [x for x in os.listdir(args.path) if 'monitor.csv' in x and '.swp' not in x]\n )\n progfile = join(args.path,'progress.csv')\n\n # First row, info from all the monitors, i.e., number of CPUs.\n for env_idx,mon in enumerate(monitors):\n monitor_path = join(args.path, mon)\n scores, steps, times = _get_stuff_from_monitor(monitor_path)\n\n # Now process to see as a function of episodes and training steps, etc.\n num_episodes = len(scores)\n tr_episodes = np.arange(num_episodes)\n tr_steps = np.cumsum(steps)\n tr_times = np.array(times) / 60.0 # get it in minutes\n\n # Plot for individual monitors.\n envlabel = 'env {}'.format(env_idx)\n sm_10 = smoothed(scores, w=10)\n ax[0,0].plot(tr_steps, sm_10, label=envlabel+'; avg {:.1f} last {:.1f}'.format(\n np.mean(sm_10), sm_10[-1]))\n sm_100 = smoothed(scores, w=100)\n ax[0,1].plot(tr_times, sm_100, label=envlabel+'; avg {:.1f} last {:.1f}'.format(\n np.mean(sm_100), sm_100[-1]))\n\n # Handle global stuff.\n total_train_steps += tr_steps[-1]\n train_hours = max(train_hours, tr_times[-1] / 60.0)\n\n # Bells and whistles\n for row in range(nrows):\n for col in range(ncols):\n ax[row,col].set_ylabel(\"Scores\", fontsize=30)\n ax[row,col].tick_params(axis='x', labelsize=25)\n ax[row,col].tick_params(axis='y', labelsize=25)\n leg = ax[row,col].legend(loc=\"best\", ncol=1, prop={'size':25})\n for legobj in leg.legendHandles:\n legobj.set_linewidth(5.0)\n ax[0,0].set_title(title+', Smoothed (w=10)', fontsize=titlesize)\n ax[0,0].set_xlabel(\"Train Steps (total {})\".format(total_train_steps), fontsize=xsize)\n ax[0,1].set_title(title+', Smoothed (w=100)', fontsize=titlesize)\n ax[0,1].set_xlabel(\"Train Time (in Hours {:.2f})\".format(train_hours), fontsize=xsize)\n plt.tight_layout()\n figname = '{}.png'.format(title)\n plt.savefig(figname)\n print(\"\\nJust saved: {}\".format(figname))\n\n\nif __name__ == \"__main__\":\n pp = argparse.ArgumentParser()\n pp.add_argument('--path', type=str)\n pp.add_argument('--title', type=str)\n args = pp.parse_args()\n plot(args)", "repo_name": "mit-gfx/PGMORL", "sub_path": "externals/baselines/draw.py", "file_name": "draw.py", "file_ext": "py", "file_size_in_byte": 4368, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 86, "dataset": "github-code", "pt": "97", "api": [{"api_name": "matplotlib.use", "line_number": 2, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style.use", "line_number": 4, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 4, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 4, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 34, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 127, "usage_type": "call"}]} +{"seq_id": "23383758188", "text": "import inspect\nfrom typing import Any, Callable\n\nimport pydantic\nfrom docstring_parser import parse\nfrom pydantic.decorator import ValidatedFunction\n\nfrom .pydantic_parser import PydanticParser\n\n\ndef clean_docstring(docstring: str) -> str:\n \"\"\"Clean up docstring before sending to OpenAI.\"\"\"\n output = []\n for line in docstring.split(\"\\n\"):\n output.append(line.strip())\n\n return \"\\n\".join(output)\n\n\ndef format_call_line(func: Callable[..., Any], *args: Any, **kwargs: Any) -> str:\n args_list = [repr(arg) for arg in args] # Convert arguments to their string representations\n kwargs_list = [f\"{key}={repr(value)}\" for key, value in kwargs.items()] # Convert keyword arguments to string representations\n\n # Combine args and kwargs into a single list\n all_args = args_list + kwargs_list\n\n # Generate the formatted function call line\n call_line = f\"{func.__name__}({', '.join(all_args)})\"\n\n return call_line\n\n\nclass FunctionSignature:\n \"\"\"A helper class to parse function signature and generate instruction for prompt.\"\"\"\n\n def __init__(self, fn: Callable[..., Any]):\n self.fn = fn\n self.sig: inspect.Signature = inspect.signature(fn)\n return_annotation = self.sig.return_annotation\n\n assert return_annotation is not inspect.Signature.empty, f\"Function {fn.__name__} must have return annotation\"\n\n class UnpackModel(pydantic.BaseModel):\n ret: return_annotation # type: ignore[valid-type]\n\n self.parser = PydanticParser[UnpackModel](pydantic_model=UnpackModel)\n\n def function_line(self) -> str:\n # NOTE: return type is instrcutive by parser\n f = str(self.sig.replace(return_annotation=inspect.Signature.empty))\n\n return f\"def {self.fn.__name__}{f}:\"\n\n def description(self) -> str:\n return clean_docstring(self.fn.__doc__ or \"\")\n\n def call_line(self, *args: Any, **kwargs: Any) -> str:\n return format_call_line(self.fn, *args, **kwargs)\n\n def locals(self, *args: Any, **kwargs: Any) -> dict[str, Any]:\n return inspect.getcallargs(self.fn, *args, **kwargs)\n\n def parse(self, text: str) -> Any:\n return self.parser.parse(text).ret\n\n def instruction(self) -> str:\n return clean_docstring(\n f\"\"\"You are now the following python function:\n ```\n # {self.description()}\n {self.function_line()}\n ```\n Only respond with your `return` value.\n {self.parser.get_format_instructions()}\n \"\"\"\n )\n\n def schema(self) -> dict[str, Any]:\n vd = ValidatedFunction(self.fn, None)\n doc_string = parse(self.description())\n descriptions = (doc_string.short_description, doc_string.long_description)\n description = \"\\n\\n\".join(filter(None, descriptions))\n\n param_desc = {k.arg_name: k.description for k in doc_string.params}\n\n def filter_parameter(parameter: dict[str, Any]) -> dict[str, Any]:\n properties = {}\n\n for k, v in parameter[\"properties\"].items():\n if k in {\"args\", \"kwargs\", \"v__duplicate_kwargs\"}:\n continue\n\n if k in param_desc and \"description\" not in v:\n v[\"description\"] = param_desc[k]\n\n properties[k] = v\n\n # NOTE: remove unnecessary keys\n return {\n \"type\": parameter[\"type\"],\n \"required\": parameter.get(\"required\", []),\n \"properties\": properties,\n \"definitions\": parameter.get(\"definitions\", {}),\n }\n\n return {\n \"name\": self.fn.__name__,\n \"description\": description,\n \"parameters\": filter_parameter(vd.model.schema()),\n }\n", "repo_name": "livingbio/gpt-fn", "sub_path": "src/gpt_fn/utils/signature.py", "file_name": "signature.py", "file_ext": "py", "file_size_in_byte": 3827, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 13, "dataset": "github-code", "pt": "94", "api": [{"api_name": "typing.Callable", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 36, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 36, "usage_type": "name"}, {"api_name": "inspect.Signature", "line_number": 38, "usage_type": "attribute"}, {"api_name": "inspect.signature", "line_number": 38, "usage_type": "call"}, {"api_name": "inspect.Signature", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pydantic.BaseModel", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pydantic_parser.PydanticParser", "line_number": 46, "usage_type": "name"}, {"api_name": "inspect.Signature", "line_number": 50, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 57, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 60, "usage_type": "name"}, {"api_name": "inspect.getcallargs", "line_number": 61, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 63, "usage_type": "name"}, {"api_name": "pydantic.decorator.ValidatedFunction", "line_number": 79, "usage_type": "call"}, {"api_name": "docstring_parser.parse", "line_number": 80, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 86, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 78, "usage_type": "name"}]} +{"seq_id": "5613658676", "text": "# (c) Nelen & Schuurmans. GPL licensed, see LICENSE.rst.\n\nimport logging\nfrom functools import lru_cache\n\nimport numpy as np\n\ntry:\n import pyproj\nexcept ImportError:\n pyproj = None\n\ntry:\n from osgeo import osr\nexcept ImportError:\n osr = None\n\ntry:\n import mercantile\nexcept ImportError:\n mercantile = None\n\ntry:\n import shapely\n from shapely.strtree import STRtree\n from shapely.wkt import loads\nexcept ImportError:\n shapely = None\n\nfrom threedigrid.numpy_utils import select_lines_by_bbox\n\nlogger = logging.getLogger(__name__)\n\nMERCANTILE_EPSG_CODE = \"4326\"\n\nBBOX_LEFT = 0\nBBOX_TOP = 1\nBBOX_RIGHT = 2\nBBOX_BOTTOM = 3\n\n\ndef raise_import_exception(name):\n raise ImportError(\n \"Could not import {}, you need to install threedigrid \"\n \"with the extra [geo], e.g \"\n \"pip install threedigrid[geo]==<version>\".format(name)\n )\n\n\n@lru_cache(10)\ndef get_transformer(source_epsg, target_epsg):\n if pyproj is None:\n raise_import_exception(\"pyproj\")\n\n return pyproj.Transformer.from_crs(\n pyproj.CRS.from_epsg(int(source_epsg)),\n pyproj.CRS.from_epsg(int(target_epsg)),\n always_xy=True,\n )\n\n\ndef transform_xys(x_array, y_array, source_epsg, target_epsg):\n \"\"\"\n Transform x_array, y_array from source_epsg_code to\n target_epsg code\n \"\"\"\n transformer = get_transformer(source_epsg, target_epsg)\n assert isinstance(x_array, np.ndarray)\n assert isinstance(y_array, np.ndarray)\n\n if x_array.size == 0 and y_array.size == 0:\n return np.array([[], []])\n\n reprojected = transformer.transform(x_array, y_array)\n return np.array(reprojected)\n\n\ndef get_spatial_reference(epsg_code):\n \"\"\"\n :param epsg_code: Spatial Reference System Identifier (SRID)\n as implemented by the European Petroleum Survey Group (EPSG).\n\n :return: an osr spatial reference instance\n\n :raise RuntimeError: if the import from epsg_code failed\n\n \"\"\"\n if osr is None:\n raise_import_exception(\"osr\")\n\n sr = osr.SpatialReference()\n try:\n sr.ImportFromEPSG(int(epsg_code))\n return sr\n except RuntimeError:\n logger.exception(\n \"[-] Importing projection from epsg code %s failed.\" % epsg_code\n )\n raise\n\n\ndef transform_bbox(bbox, source_epsg_code, target_epsg_code, all_coords=False):\n \"\"\"\n Transform bbox from source_epsg_code to target_epsg_code,\n if necessary\n\n :returns np.array of shape 4 which represent the two coordinates:\n left, bottom and right, top.\n When `all_coords` is set to `True`, a np.array of shape 8 is given\n which represents coords of the bbox in the following order:\n left top, right top, right bottom, left bottom\n \"\"\"\n if source_epsg_code != target_epsg_code:\n # XXX: Not entirely sure whether transformations between two projected\n # coordinate systems always do retain the rectangular shape of a bbox.\n # Transformations between an unprojected system (e.g. WGS84) and a\n # projected system (e.g. RDNEW) will experience distortion: the\n # resulting shape cannot be accurately represented by top left\n # and bottom right.\n source_srs = get_spatial_reference(source_epsg_code)\n target_srs = get_spatial_reference(target_epsg_code)\n if source_srs.IsProjected() != target_srs.IsProjected():\n msg = \"Transforming a bbox from %s to %s is inaccurate.\"\n logger.warning(msg, source_epsg_code, target_epsg_code)\n # Transform to [[left, right],[top, bottom]]\n input_x = [bbox[BBOX_LEFT], bbox[BBOX_RIGHT]]\n input_y = [bbox[BBOX_TOP], bbox[BBOX_BOTTOM]]\n if all_coords:\n input_x += [bbox[BBOX_RIGHT], bbox[BBOX_LEFT]]\n input_y += [bbox[BBOX_TOP], bbox[BBOX_BOTTOM]]\n bbox_trans = np.array(\n transform_xys(\n np.array(input_x), np.array(input_y), source_epsg_code, target_epsg_code\n )\n )\n\n if all_coords:\n bbox = np.array(\n [\n bbox_trans[0][0],\n bbox_trans[1][0], # left_top\n bbox_trans[0][2],\n bbox_trans[1][2], # right_top\n bbox_trans[0][1],\n bbox_trans[1][1], # right_bottom\n bbox_trans[0][3],\n bbox_trans[1][3], # left_bottom\n ]\n )\n else:\n # Transform back to [left,bottom,right,top]\n bbox = np.array(\n [\n min(bbox_trans[0]),\n min(bbox_trans[1]), # left_bottom\n max(bbox_trans[0]),\n max(bbox_trans[1]), # right_top\n ]\n )\n return bbox\n\n\ndef get_bbox_for_tile(tile_xyz=(0, 0, 0), target_epsg_code=\"4326\"):\n \"\"\"\n Get the bbox for a tile defined by x,y,z in epsg=target_epsg_code\n\n :param tile_xyz: tuple with tile x,y,z\n :param target_epsg_code: the epsg_code to reproject the bbox to.\n\n :return: The bbox of the tile\n (reprojected to target_epsg_code if necessary)\n Numpy array: [left, top, right, bottom]\n y-axis is inverted: bottom >= top\n\n Examples:\n >>> get_bbox_for_tile((10, 10, 15))\n array([-179.89013672, 85.04069252, -179.87915039, 85.04164217])\n\n >>> get_bbox_for_tile((10, 10, 15), '28992')\n array([ 212572.58551456, 5468921.2451706 , 212705.73721283,\n 5469031.60724307])\n \"\"\"\n if mercantile is None:\n raise_import_exception(\"mercantile\")\n bbox = np.array(\n mercantile.bounds(\n mercantile.Tile(x=int(tile_xyz[0]), y=int(tile_xyz[1]), z=int(tile_xyz[2]))\n )\n )\n\n if MERCANTILE_EPSG_CODE != target_epsg_code:\n bbox = transform_bbox(bbox, MERCANTILE_EPSG_CODE, target_epsg_code)\n\n return bbox\n\n\ndef select_points_by_bbox(points, bbox):\n \"\"\"\n Return a boolean mask array for the points that are in bbox\n\n :param points: np.array [[x1, x2, x3...], [y1, y2, y3...]]\n :param bbox: np.array [left, top, right, bottom], bottom >= top\n :return: a boolean mask array with 'True' for the\n points that are in the bbox\n\n Example:\n >>> select_points_by_bbox(\n ... points=np.array([[1, 10, 1, 10], [10, 10, 50, 50]]),\n ... bbox=np.array([10, 10, 50, 50]))\n array([False, True, False, True], dtype=bool)\n\n >>> select_points_by_bbox(\n ... points=np.array([[-11, 10, 10, -10], [10, 10, -11, -10]]),\n ... bbox=np.array([-10, -10, 10, 10]))\n array([False, True, False, True], dtype=bool)\n\n >>> select_points_by_bbox(\n ... points=np.array([[-11, 10, 10, -10], [10, 10, 0, -10]]),\n ... bbox=np.array([-10, 0, 10, 10]))\n array([False, True, True, False], dtype=bool)\n\n >>> select_points_by_bbox(\n ... points=np.array([[-1, 0, 0, 10], [0, 0, -11, -10]]),\n ... bbox=np.array([0, -10, 10, 0]))\n array([False, True, False, True], dtype=bool)\n\n >>> select_points_by_bbox(\n ... points=np.array([[100, -100], [110, -110]]),\n ... bbox=np.array([0, -10, 10, 0]))\n array([False, False], dtype=bool)\n \"\"\"\n lleft = np.array([bbox[BBOX_LEFT], bbox[BBOX_TOP]]) # lower-left\n uright = np.array([bbox[BBOX_RIGHT], bbox[BBOX_BOTTOM]]) # upper-right\n xy_points = points.T\n pre_sel_low = xy_points >= lleft\n pre_sel_up = xy_points <= uright\n return np.all((pre_sel_low & pre_sel_up), axis=1)\n\n\ndef select_points_by_tile(tile_xyz=(0, 0, 0), target_epsg_code=\"4326\", points=None):\n \"\"\"\n Select points by a tile\n\n :param tile_xyz: tuple with tile x,y,z\n :param target_epsg_code: the epsg_code to reproject the bbox to\n before selecting.\n :param points: (np.array) array [[x1, x2, x3...], [y1, y2, y3...]]\n :return: A boolean mask for the points within the tile_xyz bbox.\n\n Example:\n >>> select_points_by_tile((10, 10, 15),\n ... '4326',\n ... np.array([[-179.89013670, -179.89013670],\n ... [ 85.04069252, 85.04269252]]))\n array([ True, False], dtype=bool)\n \"\"\"\n\n assert isinstance(points, np.ndarray)\n\n return select_points_by_bbox(points, get_bbox_for_tile(tile_xyz, target_epsg_code))\n\n\ndef select_lines_by_tile(\n tile_xyz=(0, 0, 0), target_epsg_code=\"4326\", lines=None, include_intersections=False\n):\n \"\"\"\n Select lines by a tile\n\n :param tile_xyz: tuple with tile x,y,z\n :param target_epsg_code: the epsg_code to reproject the bbox to\n before selecting.\n :param lines: (np.array) array:\n x1_array=lines[0], y1_array=lines[1],\n x2_array=lines[2], y2_array=lines[3]\n line[i] = (x1[i], y1[i]) - (x2[i], y2[i])\n\n :return: The bbox of the tile\n (reprojected to target_epsg_code if necessary)\n \"\"\"\n\n assert isinstance(lines, np.ndarray)\n\n return select_lines_by_bbox(\n lines, get_bbox_for_tile(tile_xyz, target_epsg_code), include_intersections\n )\n\n\ndef select_geoms_by_geometry(geoms, geometry):\n \"\"\"Build an STRtree from geoms and returns indices into 'geoms'\n where geometry intersects.\n\n :param geoms: list of geometries you want to search from\n :param geometry: intersection geometry\n :return: ndarray of indices into 'geoms'\n \"\"\"\n if shapely is None:\n raise_import_exception(\"shapely\")\n\n if type(geometry) in (bytes, str):\n if isinstance(geometry, bytes):\n geometry = geometry.decode(\"utf-8\")\n # assume wkt, try to load\n geometry = loads(geometry)\n\n tree = STRtree(geoms)\n # STRtree checks intersection based on bbox of the geometry only:\n # https://github.com/Toblerity/Shapely/issues/558\n\n if shapely.__version__.startswith(\"1.\"):\n result = []\n\n for i, g in enumerate(geoms):\n g.index = i\n\n for intersected_geom in tree.query(geometry):\n if geometry.intersects(intersected_geom):\n result.append(intersected_geom.index)\n return np.array(result, dtype=int)\n else:\n return tree.query(geometry, predicate=\"intersects\")\n", "repo_name": "nens/threedigrid", "sub_path": "threedigrid/geo_utils.py", "file_name": "geo_utils.py", "file_ext": "py", "file_size_in_byte": 10204, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "97", "api": [{"api_name": "osgeo.osr", "line_number": 16, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 32, "usage_type": "call"}, {"api_name": "pyproj.Transformer.from_crs", "line_number": 55, "usage_type": "call"}, {"api_name": "pyproj.Transformer", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pyproj.CRS.from_epsg", "line_number": 56, "usage_type": "call"}, {"api_name": "pyproj.CRS", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pyproj.CRS.from_epsg", "line_number": 57, "usage_type": "call"}, {"api_name": "pyproj.CRS", "line_number": 57, "usage_type": "attribute"}, {"api_name": "functools.lru_cache", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 68, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 69, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 75, "usage_type": "call"}, {"api_name": "osgeo.osr", "line_number": 88, "usage_type": "name"}, {"api_name": "osgeo.osr.SpatialReference", "line_number": 91, "usage_type": "call"}, {"api_name": "osgeo.osr", "line_number": 91, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 185, "usage_type": "call"}, {"api_name": "mercantile.bounds", "line_number": 186, "usage_type": "call"}, {"api_name": "mercantile.Tile", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 232, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 237, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 258, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 281, "usage_type": "attribute"}, {"api_name": "threedigrid.numpy_utils.select_lines_by_bbox", "line_number": 283, "usage_type": "call"}, {"api_name": "shapely.wkt.loads", "line_number": 303, "usage_type": "call"}, {"api_name": "shapely.strtree.STRtree", "line_number": 305, "usage_type": "call"}, {"api_name": "shapely.__version__.startswith", "line_number": 309, "usage_type": "call"}, {"api_name": "shapely.__version__", "line_number": 309, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 318, "usage_type": "call"}]} +{"seq_id": "36055804802", "text": "from flask import Flask, render_template, request, redirect, url_for\nfrom rf_crossval_severness import get_severeness\n\napp = Flask(__name__)\n\nfrom werkzeug.utils import secure_filename\n\n# Set up an upload folder within your project directory\nUPLOAD_FOLDER = 'static/uploads'\nALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}\n\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n# Function to check allowed file extensions\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/result', methods=['POST'])\ndef result():\n if request.method == 'POST':\n # Extract the data from the form into the dictionary\n data = {\n 'erythema': request.form['erythema'],\n 'scaling': request.form['scaling'],\n 'definite_borders': request.form['definite_borders'],\n 'itching': request.form['itching'],\n 'koebner_phenomenon': request.form['koebner_phenomenon'],\n 'polygonal_papules': request.form['polygonal_papules'],\n 'follicular_papules': request.form['follicular_papules'],\n 'oral_mucosal_involvement': request.form['oral_mucosal_involvement'],\n 'knee_and_elbow_involvement': request.form['knee_and_elbow_involvement'],\n 'scalp_involvement': request.form['scalp_involvement'],\n 'family_history': request.form['family_history'],\n 'melanin_incontinence': request.form['melanin_incontinence'],\n 'eosinophils_infiltrate': request.form['eosinophils_infiltrate'],\n 'PNL_infiltrate': request.form['PNL_infiltrate'],\n 'fibrosis_papillary_dermis': request.form['fibrosis_papillary_dermis'],\n 'exocytosis': request.form['exocytosis'],\n 'acanthosis': request.form['acanthosis'],\n 'hyperkeratosis': request.form['hyperkeratosis'],\n 'parakeratosis': request.form['parakeratosis'],\n 'clubbing_rete_ridges': request.form['clubbing_rete_ridges'],\n 'elongation_rete_ridges': request.form['elongation_rete_ridges'],\n 'thinning_suprapapillary_epidermis': request.form['thinning_suprapapillary_epidermis'],\n 'spongiform_pustule': request.form['spongiform_pustule'],\n 'munro_microabcess': request.form['munro_microabcess'],\n 'focal_hypergranulosis': request.form['focal_hypergranulosis'],\n 'disappearance_granular_layer': request.form['disappearance_granular_layer'],\n 'vacuolisation_damage_basal_layer': request.form['vacuolisation_damage_basal_layer'],\n 'spongiosis': request.form['spongiosis'],\n 'saw_tooth_appearance_retes': request.form['saw_tooth_appearance_retes'],\n 'follicular_horn_plug': request.form['follicular_horn_plug'],\n 'perifollicular_parakeratosis': request.form['perifollicular_parakeratosis'],\n 'inflammatory_mononuclear_infiltrate': request.form['inflammatory_mononuclear_infiltrate'],\n 'band_like_infiltrate': request.form['band_like_infiltrate'],\n 'age': request.form['age']\n }\n \n # Convert string inputs to integers\n for key, value in data.items():\n data[key] = int(value)\n\n # Get prediction using the model\n pred = get_severeness(data)\n\n return render_template('result.html', prediction=pred[0])\n return redirect(url_for('severity.html'))\n\nif __name__ == '__main__':\n app.run(debug=True)", "repo_name": "vishruthb/dermalab", "sub_path": "temp-app.py", "file_name": "temp-app.py", "file_ext": "py", "file_size_in_byte": 3570, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "flask.Flask", "line_number": 4, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 28, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 28, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 30, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 30, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 31, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 32, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 32, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 33, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 33, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 34, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 35, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 35, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 36, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 36, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 37, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 37, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 38, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 38, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 39, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 39, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 40, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 41, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 41, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 42, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 42, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 43, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 43, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 44, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 44, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 45, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 45, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 46, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 47, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 48, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 48, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 49, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 49, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 50, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 50, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 51, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 52, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 53, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 53, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 54, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 54, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 55, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 55, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 56, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 57, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 57, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 58, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 58, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 59, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 59, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 60, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 60, "usage_type": "name"}, {"api_name": "rf_crossval_severness.get_severeness", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 70, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 71, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "72589052478", "text": "import collections\nimport copy\nimport pathlib\n\nimport warnings\nimport yaml\n\nfrom ..utils import check_exists\n\n\nclass ConfigReader:\n \"\"\"\n Class for parsing input config\n \"\"\"\n @staticmethod\n def merge(arguments):\n \"\"\"\n Args:\n arguments: command-line arguments\n Returns:\n dictionary containing configuration\n \"\"\"\n global_config, local_config = ConfigReader._read_configs(arguments)\n if not local_config:\n raise ValueError('Missing local config')\n\n ConfigReader._check_local_config(local_config)\n ConfigReader._prepare_global_configs(global_config)\n\n config = ConfigReader._merge_configs(global_config, local_config)\n\n ConfigReader._provide_cmd_arguments(arguments, config)\n ConfigReader._merge_paths_with_prefixes(arguments, config)\n ConfigReader._filter_launchers(config, arguments)\n return config\n\n @staticmethod\n def _read_configs(arguments):\n yaml.add_representer(\n collections.OrderedDict,\n lambda dumper, data: dumper.represent_dict(data.iteritems())\n )\n yaml.add_constructor(\n yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,\n lambda loader, node: collections.OrderedDict(loader.construct_pairs(node))\n )\n\n global_config = None\n if arguments.definitions is not None:\n definitions_path = check_exists(arguments.definitions)\n if not definitions_path.is_absolute():\n definitions_path = arguments.root / definitions_path\n\n if definitions_path.is_file():\n with pathlib.Path(definitions_path).open() as file:\n global_config = yaml.load(file)\n\n config_path = check_exists(arguments.config)\n if not config_path.is_absolute():\n config_path = arguments.root / config_path\n\n with pathlib.Path(config_path).open() as file:\n local_config = yaml.load(file)\n\n return global_config, local_config\n\n @staticmethod\n def _check_local_config(config):\n models = config.get('models')\n if not models:\n raise ValueError('Missed \"{}\" in local config'.format('models'))\n\n required_entries = ['name', 'launchers', 'datasets']\n for model in models:\n for entry in required_entries:\n config_entry = model.get(entry)\n if config_entry:\n continue\n\n raise ValueError('Each model must specify {}'.format(required_entries))\n\n for dataset in model['datasets']:\n required = ['name']\n\n missed = [entry for entry in required if entry not in dataset]\n if not missed:\n continue\n\n message = 'Model \"{}\" must specify \"{}\" for each {}'.format(model['name'], required, 'dataset')\n raise ValueError(message)\n\n @staticmethod\n def _prepare_global_configs(global_configs):\n if not global_configs or 'datasets' not in global_configs:\n return\n datasets = global_configs['datasets']\n\n def merge(local_entries, global_entries, identifier):\n if not local_entries or not global_entries:\n return\n\n for i, local in enumerate(local_entries):\n local_identifier = local.get(identifier)\n if not local_identifier:\n continue\n\n local_entries[i] = ConfigReader._merge_configs_by_identifier(global_entries, local, identifier)\n\n for dataset in datasets:\n merge(dataset.get('preprocessing'), global_configs.get('preprocessing'), 'type')\n merge(dataset.get('metrics'), global_configs.get('metrics'), 'type')\n merge(dataset.get('postprocessing'), global_configs.get('postprocessing'), 'type')\n\n @staticmethod\n def _merge_configs(global_configs, local_config):\n config = copy.deepcopy(local_config)\n if not global_configs:\n return config\n\n models = config.get('models')\n for model in models:\n for i, launcher_entry in enumerate(model['launchers']):\n model['launchers'][i] = ConfigReader._merge_configs_by_identifier(\n global_configs['launchers'], launcher_entry, 'framework'\n )\n\n for i, dataset in enumerate(model['datasets']):\n model['datasets'][i] = ConfigReader._merge_configs_by_identifier(\n global_configs['datasets'], dataset, 'name'\n )\n\n return config\n\n @staticmethod\n def _merge_configs_by_identifier(global_config, local_config, identifier):\n local_identifier = local_config.get(identifier)\n if not local_identifier:\n return local_config\n\n matched = []\n for config in global_config:\n global_identifier = config.get(identifier)\n if not global_identifier:\n continue\n\n if global_identifier != local_identifier:\n continue\n\n matched.append(config)\n\n fallback = matched[0] if matched else {}\n\n config = copy.deepcopy(fallback)\n for key, value in local_config.items():\n config[key] = value\n\n return config\n\n @staticmethod\n def _merge_paths_with_prefixes(arguments, config):\n args = arguments if isinstance(arguments, dict) else vars(arguments)\n entries_paths = {\n 'launchers': {\n 'model': 'models',\n 'weights': 'models',\n 'caffe_model': 'models',\n 'caffe_weights': 'models',\n 'tf_model': 'models',\n 'mxnet_weights': 'models',\n 'onnx_model': 'models',\n 'kaldi_model': 'models',\n 'cpu_extensions': 'extensions',\n 'gpu_extensions': 'extensions',\n 'bitstream': 'bitstreams'\n },\n 'datasets': {\n 'data_source': 'source',\n 'segmentation_masks_source': 'source',\n 'annotation': 'annotations',\n 'dataset_meta': 'annotations'\n }\n }\n\n def merge_entry_paths(keys, value):\n for field, argument in keys.items():\n if field not in value:\n continue\n\n config_path = pathlib.Path(value[field])\n if config_path.is_absolute():\n continue\n\n prefix = args[argument]\n if not prefix.is_absolute():\n prefix = args['root'] / prefix\n\n value[field] = (prefix / config_path).as_posix()\n\n for model in config['models']:\n for entry, command_line_arg in entries_paths.items():\n if entry not in model:\n continue\n\n for config_entry in model[entry]:\n merge_entry_paths(command_line_arg, config_entry)\n\n @staticmethod\n def _provide_cmd_arguments(arguments, config):\n for model in config['models']:\n for launcher_entry in model['launchers']:\n if launcher_entry['framework'].lower() != 'dlsdk':\n continue\n\n converted_models = arguments.converted_models if arguments.converted_models else arguments.models\n\n launcher_entry['_converted_models'] = converted_models\n launcher_entry['_models_prefix'] = arguments.models\n launcher_entry['_model_optimizer'] = arguments.model_optimizer\n\n @staticmethod\n def _filter_launchers(config, arguments):\n def filtered(launcher):\n config_framework = launcher['framework']\n target_framework = args.get('target_framework')\n if target_framework and target_framework.lower() != config_framework.lower():\n return True\n\n config_device = launcher.get('device')\n target_device = args.get('target_device')\n if target_device and config_device and target_device.lower() != config_device.lower():\n return True\n\n return False\n\n args = arguments if isinstance(arguments, dict) else vars(arguments)\n for model in config['models']:\n launchers = model['launchers']\n launchers = [launcher for launcher in launchers if not filtered(launcher)]\n\n if not launchers:\n warnings.warn('Model \"{}\" has no launchers'.format(model['name']))\n\n model['launchers'] = launchers\n", "repo_name": "cogniteam/intel-pengo", "sub_path": "docker/openvino/open_model_zoo/demos/python_demos/accuracy_checker/accuracy_checker/config/config_reader.py", "file_name": "config_reader.py", "file_ext": "py", "file_size_in_byte": 8625, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "97", "api": [{"api_name": "yaml.add_representer", "line_number": 39, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 40, "usage_type": "attribute"}, {"api_name": "yaml.add_constructor", "line_number": 43, "usage_type": "call"}, {"api_name": "yaml.resolver", "line_number": 44, "usage_type": "attribute"}, {"api_name": "collections.OrderedDict", "line_number": 45, "usage_type": "call"}, {"api_name": "utils.check_exists", "line_number": 50, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 55, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 56, "usage_type": "call"}, {"api_name": "utils.check_exists", "line_number": 58, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 62, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 63, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 116, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 153, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 189, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 241, "usage_type": "call"}]} +{"seq_id": "9104321039", "text": "\nimport bconfig, util\nimport copy\nfrom datetime import datetime, timedelta\nimport pytz, traceback\nimport importlib\nimport threading\nimport re\n\nSQL_TEMPLATE = 'select {fields} from {table} where time >= convert_tz(\"{start}\", \"{timezone}\", \"Etc/UTC\") and time < convert_tz(\"{end}\", \"{timezone}\", \"Etc/UTC\") {conditions} {groupby} {orderby} {sort} {limit}'\n\nHIVE_TEMPLATE = 'select {fields} from {table} where {partition} >= \"{start}\" and {partition} < \"{end}\" {conditions} {groupby} {orderby} {sort} {limit}'\n\nSQL_JOIN_TEMPLATE = '''\n\t\t\t\t\tselect {fields} from \n\t\t\t\t\t\t(select {raw_fields_t1} from {table1} \n\t\t\t\t\t\twhere time >= convert_tz(\"{start}\", \"{timezone}\", \"Etc/UTC\") \n\t\t\t\t\t\t\tand time < convert_tz(\"{end}\", \"{timezone}\", \"Etc/UTC\") \n\t\t\t\t\t\t\t{conditions_t1} \n\t\t\t\t\t\t{groupby_t1}) {table1}\n\t\t\t\t\t\t\n\t\t\t\t\t\tjoin\n\t\t\t\t\t\t\n\t\t\t\t\t\t(select {raw_fields_t2} from {table2} \n\t\t\t\t\t\twhere\n\t\t\t\t\t\t\t1 = 1\n\t\t\t\t\t\t\t{conditions_t2} \n\t\t\t\t\t\t{groupby_t2}) {table2}\n\t\t\t\t\t\t\n\t\t\t\t\t\ton {table1}.{join_field_t1} = {table2}.{join_field_t2}\n\t\t\t\t\twhere\n\t\t\t\t\t\t1 = 1\n\t\t\t\t\t\t{conditions}\n\t\t\t\t\t{groupby}\n\t\t\t\t\t{orderby} {sort}\n\t\t\t\t\t{limit}\n\t\t\t\t\t'''\n\n\nclass api(object):\n\t\n\tdef execute(self, appconfig, query, args):\n\t\turl = query['url'].format(**args)\n\t\tyca = query.get('yca')\n\t\treturn util.fetch_api_data(appconfig, url, yca)\n\nclass sql(object):\n\n\tdef execute(self, appconfig, query, args):\n\t\tret = {}\n\n\t\tstart, end, timezone = util.parseTime(args)\n\n\t\tself.parallel = util.get(query, 'parallel', 'false')\n\t\tself.threads = []\n\t\tquery['timezone'] = timezone\n\t\tquery['conditions'] = self.get_additional_conditions(args, query)\n\n\t\toffset_minutes = util.get(query, 'offset_minutes', 0)\n\t\t\n\t\tself.fetch_data(appconfig, query, args, start, end, util.total_seconds(timedelta(minutes=offset_minutes)), ret, 'today')\n\n\t\tif 'true' == query.get('dod'):\n\t\t\tself.fetch_data(appconfig, query, args, start, end, util.total_seconds(timedelta(days=1, minutes=offset_minutes)), ret, 'ystd')\n\t\t\n\t\tif 'true' == query.get('wow'):\n\t\t\tself.fetch_data(appconfig, query, args, start, end, util.total_seconds(timedelta(days=7, minutes=offset_minutes)), ret, 'lastwk')\n\n\t\tif self.parallel and len(self.threads) > 0:\n\t\t\tfor t in self.threads:\n\t\t\t\tt.join()\n\n\t\treturn ret\n\n\tdef get_additional_conditions(self, args, query):\n\t\tconditions = []\n\t\ttry:\n\t\t\tfilters = query['filters'].split(',')\n\t\t\tfor filter in filters:\n\t\t\t\tif filter not in args: continue\n\t\t\t\tconditions.append(\"%s in (%s)\" % (filter, args[filter]))\n\t\texcept:\n\t\t\tprint('[WARN] skip filters.')\n\t\t\tpass\n\n\t\tif 'fixed_condition' in query:\n\t\t\tconditions.append(query['fixed_condition'])\n\n\t\treturn \"\".join(map(\"\".join, zip([\" and \"]*len(conditions), conditions)))\n\t\n\tdef fetch_data(self, appconfig, query, args, start, end, offset, data, _type):\n\t\tif self.parallel == 'true':\n\t\t\tt = threading.Thread(target=self.fetch_data0, args = (appconfig, query, args, start, end, offset, data, _type))\n\t\t\tt.start()\n\t\t\tself.threads.append(t)\n\t\telse:\n\t\t\tself.fetch_data0(appconfig, query, args, start, end, offset, data, _type)\n\n\tdef fetch_data0(self, appconfig, query, args, start, end, offset, data, _type):\n\n\t\tquery = copy.deepcopy(query)\n\n\t\tdateformat = util.get(query, 'dateformat', util.DATE_ISO)\n\t\tquery['dateformat'] = util.convert_date_format(dateformat)\n\t\t\t\n\t\t# timezone_switch_aware_offset is used to make time align in graph\n\t\tadjusted_offset = util.timezone_switch_aware_offset(start, offset, query['timezone'])\n\t\tgroupby = query.get('groupby')\n\t\tfields = query['fields'].split(',')\n\t\tif groupby:\n\t\t\tquery['fields'] = \",\".join(map(lambda f: 'sum(%s) as %s' % (f,f) if(f not in groupby and not re.match(\" as \", f, re.IGNORECASE)) else f, fields))\n\t\t\tquery['groupby'] = \"group by \" + groupby\n\t\telse:\n\t\t\tquery['groupby'] = \"\"\n\n\t\tquery['orderby'] = util.get(query, 'orderby', '', lambda x: 'order by ' + x)\n\t\tquery['sort'] = util.get(query, 'sort', '')\n\t\tquery['limit'] = util.get(query, 'limit', '', lambda x: 'limit ' + x)\n\n\t\t# support parameters in conditions\n\t\tquery['conditions'] = query['conditions'].format(**query)\n\n\t\tquery['partition'] = util.get(query, 'partition', 'ds')\n\t\t\n\t\t# support parameters to join 2 tables\n\t\tjoin = util.get(query, 'join', None)\n\t\tjoin_to = None\n\t\tif join:\n\t\t\tjoin_to = util.get(join, 'join_to', None)\n\t\t\tif join_to:\n\t\t\t\tquery['table1'] = util.get(query, 'table', None)\n\t\t\t\tquery['groupby_t1'] = \"group by \" + util.get(join, 'groupby', '')\n\t\t\t\tquery['conditions_t1'] = self.get_additional_conditions(args, join)\n\t\t\t\tquery['raw_fields_t1'] = \",\".join(map(lambda f: 'sum(%s) as %s' % (f,f) if(f not in query['groupby_t1'] and not re.match(\" as \", f, re.IGNORECASE)) else f, join['raw_fields'].split(',')))\n\t\t\t\tquery['join_field_t1'] = util.get(join, 'on', None)\n\t\t\t\t\n\t\t\t\tquery['table2'] = util.get(join_to, 'table', None)\n\t\t\t\tquery['groupby_t2'] = \"group by \" + util.get(join_to, 'groupby', '')\n\t\t\t\tquery['conditions_t2'] = self.get_additional_conditions(args, join_to)\n\t\t\t\tquery['raw_fields_t2'] = \",\".join(map(lambda f: 'sum(%s) as %s' % (f,f) if(f not in query['groupby_t2'] and not re.match(\" as \", f, re.IGNORECASE)) else f, join_to['raw_fields'].split(',')))\n\t\t\t\tquery['join_field_t2'] = util.get(join_to, 'on', None)\n\t\t\t\n\t\t\t\n\t\toffset_w_timezone = adjusted_offset + util.getOffsetSeconds(start, query['timezone'])\n\t\tif util.get(query, 'source', None) == 'hive':\n\t\t\tquery['start'] = util.time_to_utc(start - offset, dateformat, query['timezone'])\n\t\t\tquery['end'] = util.time_to_utc(end - offset, dateformat, query['timezone'])\n\t\t\tsql = HIVE_TEMPLATE.format(**query)\n\t\t\tres = util.fetch_hive_data(appconfig, fields, sql, offset_w_timezone)\n\t\telse:\n\t\t\t#query['start'] & query['end'] need to be in timezone-free format, \n\t\t\t#because SQL_TEMPLATE will convert it to timezone-specific time\n\t\t\tquery['start'] = util.time2str(start - offset, dateformat)\n\t\t\tquery['end'] = util.time2str(end - offset, dateformat)\n\t\t\tif join and join_to:\n\t\t\t\tsql = SQL_JOIN_TEMPLATE.format(**query)\n\t\t\telse:\n\t\t\t\tsql = SQL_TEMPLATE.format(**query)\n\t\t\tres = util.fetch_sql_data(appconfig, fields, sql, offset_w_timezone)\n\n\t\tdata[_type] = res\n\n\ndef _execute(appconfig, query, args, fetcher):\n\treturn eval(fetcher)().execute(appconfig, query, args)\n\ndef execute(appconfig, page, args):\n\tnavigation = bconfig.load_config(page)\n\tif navigation['id'] == 'brick':\n\t\treturn {'error': 'no action for %s' % (page)}\n\n\ttabs = navigation['tabs']\n\tret = {}\n\t\n\ttry:\n\t\tmodule = importlib.import_module(page)\n\texcept Exception as e:\n\t\tprint(e)\n\t\tprint('[INFO] no module %s function found' % (page))\n\t\tmodule = None\n\n\ttry:\n\t\tfor tab in tabs:\n\t\t\ttabid = util.get(args, 'tabid', None)\n\t\t\tif tabid and tabid != tab['id']: continue\n\n\t\t\tfor fetcher in ['sql', 'api', 'hive']:\n\t\t\t\tif fetcher not in tab: continue\n\n\t\t\t\tquery = copy.deepcopy(tab[fetcher])\n\n\t\t\t\tif module and hasattr(module, 'preexecute'):\n\t\t\t\t\tquery, args = module.preexecute(appconfig, query, args)\n\n\t\t\t\tif module and hasattr(module, 'execute'):\n\t\t\t\t\tdata = module.execute(appconfig, query, args, fetcher)\n\t\t\t\telse:\n\t\t\t\t\tdata = _execute(appconfig, query, args, fetcher)\n\n\t\t\t\tif module and hasattr(module, 'postexecute'):\n\t\t\t\t\tdata = module.postexecute(appconfig, query, args, data)\n\n\t\t\tret[tabid] = data\n\t\t\tbreak\n\t\t\t\t\n\texcept Exception as e:\n\t\tprint(\"[ERROR]\", e)\n\t\ttraceback.print_exc()\n\n\treturn ret\n", "repo_name": "zezhen/brick", "sub_path": "modules/brick.py", "file_name": "brick.py", "file_ext": "py", "file_size_in_byte": 7233, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "util.fetch_api_data", "line_number": 45, "usage_type": "call"}, {"api_name": "util.parseTime", "line_number": 52, "usage_type": "call"}, {"api_name": "util.get", "line_number": 54, "usage_type": "call"}, {"api_name": "util.get", "line_number": 59, "usage_type": "call"}, {"api_name": "util.total_seconds", "line_number": 61, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 61, "usage_type": "call"}, {"api_name": "util.total_seconds", "line_number": 64, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 64, "usage_type": "call"}, {"api_name": "util.total_seconds", "line_number": 67, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 67, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 93, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 101, "usage_type": "call"}, {"api_name": "util.get", "line_number": 103, "usage_type": "call"}, {"api_name": "util.DATE_ISO", "line_number": 103, "usage_type": "attribute"}, {"api_name": "util.convert_date_format", "line_number": 104, "usage_type": "call"}, {"api_name": "util.timezone_switch_aware_offset", "line_number": 107, "usage_type": "call"}, {"api_name": "re.match", "line_number": 111, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 111, "usage_type": "attribute"}, {"api_name": "util.get", "line_number": 116, "usage_type": "call"}, {"api_name": "util.get", "line_number": 117, "usage_type": "call"}, {"api_name": "util.get", "line_number": 118, "usage_type": "call"}, {"api_name": "util.get", "line_number": 123, "usage_type": "call"}, {"api_name": "util.get", "line_number": 126, "usage_type": "call"}, {"api_name": "util.get", "line_number": 129, "usage_type": "call"}, {"api_name": "util.get", "line_number": 131, "usage_type": "call"}, {"api_name": "util.get", "line_number": 132, "usage_type": "call"}, {"api_name": "re.match", "line_number": 134, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 134, "usage_type": "attribute"}, {"api_name": "util.get", "line_number": 135, "usage_type": "call"}, {"api_name": "util.get", "line_number": 137, "usage_type": "call"}, {"api_name": "util.get", "line_number": 138, "usage_type": "call"}, {"api_name": "re.match", "line_number": 140, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 140, "usage_type": "attribute"}, {"api_name": "util.get", "line_number": 141, "usage_type": "call"}, {"api_name": "util.getOffsetSeconds", "line_number": 144, "usage_type": "call"}, {"api_name": "util.get", "line_number": 145, "usage_type": "call"}, {"api_name": "util.time_to_utc", "line_number": 146, "usage_type": "call"}, {"api_name": "util.time_to_utc", "line_number": 147, "usage_type": "call"}, {"api_name": "util.fetch_hive_data", "line_number": 149, "usage_type": "call"}, {"api_name": "util.time2str", "line_number": 153, "usage_type": "call"}, {"api_name": "util.time2str", "line_number": 154, "usage_type": "call"}, {"api_name": "util.fetch_sql_data", "line_number": 159, "usage_type": "call"}, {"api_name": "bconfig.load_config", "line_number": 168, "usage_type": "call"}, {"api_name": "importlib.import_module", "line_number": 176, "usage_type": "call"}, {"api_name": "util.get", "line_number": 184, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 190, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 208, "usage_type": "call"}]} +{"seq_id": "26468951463", "text": "# Necessary to run:\n# pip install evdev\n# pip install RPi.GPIO\n\nimport sys\nimport evdev\nimport RPi.GPIO as GPIO\nfrom time import sleep\nimport readchar\n\n# Configure the GPIO pin for the diode\nDIODE_PIN = 4\nSPEAKER_PIN = 17\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(DIODE_PIN, GPIO.OUT, initial=GPIO.LOW)\nGPIO.setup(SPEAKER_PIN, GPIO.OUT) # Replace 17 with the GPIO pin connected to the key\n\n# Notes for sound\nnote_G = 391.00\nnote_A = 440.00\nnote_B = 493.88\nnote_C = 523.25\nnote_D = 587.33\nnote_E = 659.25\nnote_Fs = 739.99\nnote_Go = 790.00\n\n\ndef play_tone(frequency, duration):\n pwm = GPIO.PWM(SPEAKER_PIN, frequency)\n pwm.start(50) # 50% duty cycle\n sleep(duration)\n pwm.stop()\n\ndef play_arpeggio(duration): # Plays C E G C G E C\n q = duration/6\n breath = .001\n play_tone(note_G, q*.75)\n sleep(breath)\n play_tone(note_C, q*.25)\n sleep(breath)\n play_tone(note_C, q*1.5)\n sleep(q*.25)\n play_tone(note_C, q*.25)\n sleep(breath)\n play_tone(note_D, q*.33)\n sleep(breath)\n play_tone(note_C, q*.33)\n sleep(breath)\n play_tone(note_D, q*.33)\n sleep(breath)\n play_tone(note_E, q*.75)\n sleep(breath)\n play_tone(note_Go, q*.25)\n sleep(breath)\n play_tone(note_Go, q)\n\ndef play_low_c(duration):\n play_tone(262.00, duration/3)\n play_tone(130.81, duration/3)\n play_tone(130.81, duration/3)\n\n# Define the keyboard input code to trigger the diode\nTARGET_CODE = \"123425\"\n\n# Function to search for an item in a list\ndef search_item(item, my_list):\n for i in my_list:\n print(i)\n if isinstance(i, tuple) and i[0].startswith(item):\n return i\n return -1\n\n# Function to handle keyboard4 events\ndef handle_keyboard_events():\n # Find the USB keyboard device\n devices = [evdev.InputDevice(fn) for fn in evdev.list_devices()]\n keyboard = None\n for device in devices:\n index = search_item('KEY_KP1', device.capabilities(verbose=True)[('EV_KEY',1)])\n if index != -1:\n keyboard = device\n break\n\n if not keyboard:\n print(\"USB keyboard not found.\")\n sys.exit(1)\n\n print(\"Waiting for input...\")\n input_buffer = \"\"\n while(True):\n print('Reading a key:')\n key_value = readchar.readkey()\n print(key_value + \" entered.\")\n if key_value == \"ENTER\":\n # Check if the input code matches the target code\n if input_buffer != TARGET_CODE:\n # Turn off the diode and clear the input buffer\n GPIO.output(DIODE_PIN, GPIO.LOW)\n play_low_c(1)\n input_buffer = \"\"\n print(\"Code cleared. Diode turned off.\")\n else:\n # Clear the input buffer without turning off the diode\n GPIO.output(DIODE_PIN, GPIO.HIGH)\n play_arpeggio(3)\n input_buffer = \"\"\n print(\"Success!\")\n sleep(1)\n GPIO.output(DIODE_PIN, GPIO.LOW)\n\n else:\n # Append the pressed key to the input buffer\n input_buffer += key_value\n print(\"Buffer: \" + input_buffer)\n\n\n\n\n# Run the program\ntry:\n handle_keyboard_events()\nexcept KeyboardInterrupt:\n # Clean up the GPIO pin on program exit\n GPIO.output(DIODE_PIN, GPIO.LOW)\n GPIO.cleanup()\n\n\n", "repo_name": "alexalexalex09/escape", "sub_path": "symbols/keyboardCode.py", "file_name": "keyboardCode.py", "file_ext": "py", "file_size_in_byte": 3310, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "94", "api": [{"api_name": "RPi.GPIO.setmode", "line_number": 14, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 14, "usage_type": "name"}, {"api_name": "RPi.GPIO.BCM", "line_number": 14, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 15, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 15, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 15, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.LOW", "line_number": 15, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 16, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 16, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 16, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.PWM", "line_number": 30, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 30, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 32, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 39, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 41, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 43, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 45, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 47, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 49, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 51, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 53, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 55, "usage_type": "call"}, {"api_name": "evdev.InputDevice", "line_number": 77, "usage_type": "call"}, {"api_name": "evdev.list_devices", "line_number": 77, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 87, "usage_type": "call"}, {"api_name": "readchar.readkey", "line_number": 93, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 99, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 99, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 99, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 105, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 105, "usage_type": "name"}, {"api_name": "RPi.GPIO.HIGH", "line_number": 105, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 109, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 110, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 110, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 110, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 125, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 125, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 125, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.cleanup", "line_number": 126, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 126, "usage_type": "name"}]} +{"seq_id": "9785867500", "text": "from airflow import DAG\nfrom airflow.operators.python import PythonOperator\nfrom airflow.operators.bash import BashOperator\nfrom src.preprocessing.data_preprocessing import data_preprocessing\nfrom datetime import datetime, timedelta\n\n\ndef load_data():\n with open('params.yaml') as file:\n try:\n params = yaml.safe_load(file)['preprocessing']\n except yaml.YAMLError as exception:\n print(exception)\n\n data_preprocessing(\n dataset_csv_path='data/test.csv',\n save_path='data/preprocessed/train_dataset.csv',\n train=False,\n dataset_for_lag_features_csv_path='data/sales_train.csv',\n date_block_num=34,\n description_csv_path='data/'\n )\n\ndefault_args = {\n 'owner': 'coder2j',\n 'retries': 5,\n 'retry_delay': timedelta(minutes=5)\n}\n\nwith DAG(\n #default_args=default_args,\n dag_id='our_dag_with_python_operator_v07',\n description='Dag for test dataset prediction.',\n start_date=datetime(2021, 10, 6),\n schedule_interval='@daily'\n) as dag:\n task1 = PythonOperator(\n task_id='get_name',\n python_callable=load_data\n )\n", "repo_name": "DanceHallDay/mlops", "sub_path": "dags/prediction_dag.py", "file_name": "prediction_dag.py", "file_ext": "py", "file_size_in_byte": 1135, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "src.preprocessing.data_preprocessing.data_preprocessing", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 27, "usage_type": "call"}, {"api_name": "airflow.DAG", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 34, "usage_type": "call"}, {"api_name": "airflow.operators.python.PythonOperator", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "12698922244", "text": "\nfrom django.urls import path, re_path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"login\", views.login_view, name=\"login\"),\n path(\"logout\", views.logout_view, name=\"logout\"),\n path(\"register\", views.register, name=\"register\"),\n path(\"profile/<int:user_id>\", views.profile, name=\"profile\"),\n path(\"following\", views.following, name=\"following\"),\n # API routes\n path(\"follow/\", views.follow),\n path(\"edit/<int:post_id>\", views.edit),\n path(\"<int:user_id>/edit/<int:post_id>\", views.edit_from_profile),\n path(\"like/<int:post_id>\", views.like),\n path(\"toggle_visibility/<int:post_id>\", views.toggle_visibility),\n \n]\n", "repo_name": "DevelopWithAP/network", "sub_path": "network/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 690, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "32417192530", "text": "import numpy as np\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\nfrom flask import Flask, jsonify\n\n# Database Setup\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\n\n# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n# Save references to the table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n\n# Flask Setup\napp = Flask(__name__)\n\n# Flask Routes\n@app.route(\"/\")\ndef welcome():\n print(\"Server received request for 'welcome' page...\")\n \n \"\"\"List all available api routes.\"\"\"\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/*start: A date string in the format %Y-%m-%d<br/>\"\n f\"/api/v1.0/*start: A date string in the format %Y-%m-%d/*end: A date string in the format %Y-%m-%d<br/>\"\n f\"*Be sure to replace start and end dates with valid dates from 2010-01-01 to 2017-08-23\"\n )\n\n@app.route(\"/api/v1.0/precipitation\")\ndef precipitation():\n print(\"Server received request for 'precipitation' page...\")\n \n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n \"\"\"Return a list of dates and precipitation\"\"\"\n # Query Measurement for last year\n year_precip = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= \"2016-08-23\").all()\n \n session.close()\n\n # Create a dictionary using date as the key and prcp as the value\n all_precip = []\n for date, prcp in year_precip:\n precip_dict = {}\n precip_dict[date] = prcp\n all_precip.append(precip_dict)\n\n #Return the JSON representation of dictionary\n return jsonify(all_precip)\n\n@app.route(\"/api/v1.0/stations\")\ndef stations():\n print(\"Server received request for 'stations' page...\")\n \n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n \"\"\"Return a list of all stations\"\"\"\n # Query all stations\n stations = session.query(Measurement.station).distinct().all()\n\n session.close()\n\n # Convert list of tuples into normal list\n all_stations = list(np.ravel(stations))\n\n return jsonify(all_stations)\n\n@app.route(\"/api/v1.0/tobs\")\ndef tobs():\n print(\"Server received request for 'tobs' page...\")\n \n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n \"\"\"Return a list of temperatures for most active station\"\"\"\n # Query the dates and temperature observations of the most active station for the last year of data\n station_temp = session.query(Measurement.tobs).filter(Measurement.date >= \"2016-08-23\").\\\n filter(Measurement.station == \"USC00519281\").all()\n\n session.close()\n\n # Convert list of tuples into normal list\n station_temps = list(np.ravel(station_temp))\n\n return jsonify(station_temps)\n\n@app.route(\"/api/v1.0/<start>\")\ndef calc_temps(start):\n print(\"Server received request for 'start' page...\")\n \n session = Session(engine)\n\n temp_args = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).all()\n \n session.close()\n\n temp_list = list(np.ravel(temp_args))\n return jsonify(temp_list)\n\n@app.route(\"/api/v1.0/<start>/<end>\")\ndef calc_temps2(start, end):\n print(\"Server received request for 'start/end' page...\")\n \n session = Session(engine)\n \n temp_args2 = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).filter(Measurement.date <= end).all()\n \n session.close()\n\n temp_list2 = list(np.ravel(temp_args2))\n return jsonify(temp_list2)\n\nif __name__ == '__main__':\n app.run(debug=True) ", "repo_name": "trackrun82/sqlalchemy-challenge", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 3953, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "sqlalchemy.create_engine", "line_number": 11, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.automap.automap_base", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 62, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 78, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 80, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 97, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 99, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 105, "usage_type": "call"}, {"api_name": "sqlalchemy.func.min", "line_number": 107, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 107, "usage_type": "name"}, {"api_name": "sqlalchemy.func.avg", "line_number": 107, "usage_type": "call"}, {"api_name": "sqlalchemy.func.max", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 112, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 113, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 119, "usage_type": "call"}, {"api_name": "sqlalchemy.func.min", "line_number": 121, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 121, "usage_type": "name"}, {"api_name": "sqlalchemy.func.avg", "line_number": 121, "usage_type": "call"}, {"api_name": "sqlalchemy.func.max", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 126, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 127, "usage_type": "call"}]} +{"seq_id": "7306647699", "text": "from bs4 import BeautifulSoup\r\nimport requests\r\n\r\n'''go to booking.com, put desired location,occupancy quantity, and dates. Then press enter and \r\npaste those lnks here '''\r\n\r\n\r\n# Paste the URL here\r\nurl = \"https://www.booking.com/searchresults.en-gb.html?ss=Mendoza&ssne=Mendoza&ssne_untouched=Mendoza&label=gen173nr-1FCAEoggI46AdIM1gEaAyIAQGYAQm4ARfIAQzYAQHoAQH4AQuIAgGoAgO4Au3_0p8GwAIB0gIkNDA3M2Q2ZjMtZjZjOC00ZWNjLWFmNjQtZWFlZDlmYjVjMTc22AIG4AIB&sid=d8b713e5b1e956005a30a6976b1dc703&aid=304142&lang=en-gb&sb=1&src_elem=sb&src=searchresults&dest_id=-1003869&dest_type=city&checkin=2023-02-21&checkout=2023-02-22&group_adults=2&no_rooms=1&group_children=0&sb_travel_purpose=leisure\"\r\n\r\n# Fetch the page\r\npage = requests.get(url)\r\n\r\n# Initialize html parser\r\nsoup = BeautifulSoup(page.content, \"html.parser\")\r\n\r\n# Find all property cards\r\nproperty_cards = soup.find_all(\"div\", class_=\"a826ba81c4 fe821aea6c fa2f36ad22 afd256fc79 d08f526e0d ed11e24d01 ef9845d4b3 da89aeb942\")\r\n\r\n# Print title of each property\r\nfor card in property_cards:\r\n title_element = card.find('div', class_='fcab3ed991 a23c043802')\r\n title = title_element.get_text()\r\n price_element = card.find('span', class_=\"fcab3ed991 fbd1d3018c e729ed5ab6\")\r\n price = price_element.get_text()\r\n occupancy_element = card.find('div', class_=\"d8eab2cf7f c90c0a70d3\")\r\n occupancy = occupancy_element.get_text()\r\n info = [title, price, occupancy]\r\n print(info)\r\n", "repo_name": "ortizfram/Booking-Price-Scraper", "sub_path": "script.py", "file_name": "script.py", "file_ext": "py", "file_size_in_byte": 1442, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "94", "api": [{"api_name": "requests.get", "line_number": 12, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "23518992690", "text": "import streamlit as st\nimport preprocessing\n\nimport pickle\nfrom PIL import Image\nimage = Image.open('pic.png')\n\nnmf= pickle.load(open('nmf.pkl', 'rb'))\nvectorizer= pickle.load(open('tfidf.vec', 'rb'))\n\n\n# Add a title and intro text\nst.title('Topics extractor')\nst.text('This is a web app that allows Topics extraction from bad restaurant reviews')\n\n\n\n\nwith st.form(\"my_form\"):\n st.image(image)\n\n number = st.number_input(\"topics number\", value=1, min_value=1, max_value=15)\n text=st.text_input(\"Your review\", placeholder=\"At least 4 characters\")\n # Every form must have a submit button.\n submitted = st.form_submit_button(\"Extract topics\")\n if submitted:\n if len(text) < 5:\n st.error(\"Invalid text: At least 4 characters\")\n else:\n pre=preprocessing.predict_topics(nmf, vectorizer,number,text)\n if not pre[1]:\n st.info(\"Polarity score: \"+ str(pre[0]))\n else:\n st.info(\"Polarity score: \"+ str(pre[0]))\n st.info(\"Topics: \"+ str(pre[1]))\n\n", "repo_name": "koussayb6/NLP_Jalon3", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1022, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "94", "api": [{"api_name": "PIL.Image.open", "line_number": 6, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 6, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 8, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 9, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 13, "usage_type": "call"}, {"api_name": "streamlit.text", "line_number": 14, "usage_type": "call"}, {"api_name": "streamlit.form", "line_number": 19, "usage_type": "call"}, {"api_name": "streamlit.image", "line_number": 20, "usage_type": "call"}, {"api_name": "streamlit.number_input", "line_number": 22, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 23, "usage_type": "call"}, {"api_name": "streamlit.form_submit_button", "line_number": 25, "usage_type": "call"}, {"api_name": "streamlit.error", "line_number": 28, "usage_type": "call"}, {"api_name": "preprocessing.predict_topics", "line_number": 30, "usage_type": "call"}, {"api_name": "streamlit.info", "line_number": 32, "usage_type": "call"}, {"api_name": "streamlit.info", "line_number": 34, "usage_type": "call"}, {"api_name": "streamlit.info", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "72650609909", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\n## Module infomation ###\n# Python (3.4.4)\n# numpy (1.10.2)\n# PyAudio (0.2.9)\n# matplotlib (1.5.1)\n# All 32bit edition\n########################\n\nimport numpy as np\nimport pyaudio\n\nimport matplotlib.pyplot as plt\n\nclass SpectrumAnalyzer:\n FORMAT = pyaudio.paFloat32\n CHANNELS = 1\n RATE = 16000\n CHUNK = 512\n START = 0\n N = 512\n\n wave_x = 0\n wave_y = 0\n spec_x = 0\n spec_y = 0\n data = []\n\n def __init__(self):\n self.pa = pyaudio.PyAudio()\n self.stream = self.pa.open(format = self.FORMAT,\n channels = self.CHANNELS, \n rate = self.RATE, \n input = True,\n output = False,\n frames_per_buffer = self.CHUNK)\n # Main loop\n self.loop()\n\n def loop(self):\n try:\n while True :\n self.data = self.audioinput()\n self.fft()\n self.graphplot()\n\n except KeyboardInterrupt:\n self.pa.close()\n\n print(\"End...\")\n\n def audioinput(self):\n ret = self.stream.read(self.CHUNK)\n ret = np.fromstring(ret, np.float32)\n return ret\n\n def fft(self):\n self.wave_x = range(self.START, self.START + self.N)\n self.wave_y = self.data[self.START:self.START + self.N]\n self.spec_x = np.fft.fftfreq(self.N, d = 1.0 / self.RATE) \n y = np.fft.fft(self.data[self.START:self.START + self.N]) \n self.spec_y = [np.sqrt(c.real ** 2 + c.imag ** 2) for c in y]\n\n def graphplot(self):\n plt.clf()\n # wave\n plt.subplot(311)\n plt.plot(self.wave_x, self.wave_y)\n plt.axis([self.START, self.START + self.N, -0.5, 0.5])\n plt.xlabel(\"time [sample]\")\n plt.ylabel(\"amplitude\")\n #Spectrum\n plt.subplot(312)\n plt.plot(self.spec_x, self.spec_y, marker= 'o', linestyle='-')\n plt.axis([0, self.RATE / 2, 0, 50])\n plt.xlabel(\"frequency [Hz]\")\n plt.ylabel(\"amplitude spectrum\")\n #Pause\n plt.pause(.01)\n\nif __name__ == \"__main__\":\n spec = SpectrumAnalyzer()\n ", "repo_name": "gistable/gistable", "sub_path": "all-gists/950bbb9d9d4014d893e5/snippet.py", "file_name": "snippet.py", "file_ext": "py", "file_size_in_byte": 2119, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 74, "dataset": "github-code", "pt": "94", "api": [{"api_name": "pyaudio.paFloat32", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pyaudio.PyAudio", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.fromstring", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 56, "usage_type": "attribute"}, {"api_name": "numpy.fft.fftfreq", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 62, "usage_type": "attribute"}, {"api_name": "numpy.fft.fft", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 63, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pause", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}]} +{"seq_id": "43320002042", "text": "from django.urls import path\nfrom .views import TrainingView, ReadTrainingResult, TaskStatusView, ProfileListView, ProfileDeleteView\nfrom .views import get_create_profile_view, get_update_profile_view\n\napp_name = \"base\"\nurlpatterns = [\n\tpath('', TrainingView.as_view(), name=\"training\"),\n\tpath('readResult/<task_id>', ReadTrainingResult.as_view(), name=\"readResult\"),\n\tpath('getTaskStatus/<task_id>', TaskStatusView.as_view(), name=\"getTaskStatus\"),\n\tpath('profile/', ProfileListView.as_view(), name=\"list-profile\"),\n\tpath('profile/create/<profile_name>', get_create_profile_view, name=\"create-profile\"),\n\tpath('profile/edit/<profile_name>/<int:pk>', get_update_profile_view, name=\"edit-profile\"),\n\tpath('profile/delete/<int:pk>', ProfileDeleteView.as_view(), name=\"delete-profile\"),\n]\n", "repo_name": "chschenk/SVP2", "sub_path": "base/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 786, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "views.TrainingView.as_view", "line_number": 7, "usage_type": "call"}, {"api_name": "views.TrainingView", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "views.ReadTrainingResult.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "views.ReadTrainingResult", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "views.TaskStatusView.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "views.TaskStatusView", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "views.ProfileListView.as_view", "line_number": 10, "usage_type": "call"}, {"api_name": "views.ProfileListView", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "views.get_create_profile_view", "line_number": 11, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "views.get_update_profile_view", "line_number": 12, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "views.ProfileDeleteView.as_view", "line_number": 13, "usage_type": "call"}, {"api_name": "views.ProfileDeleteView", "line_number": 13, "usage_type": "name"}]} +{"seq_id": "7769072146", "text": "\"\"\"datefield changed\n\nRevision ID: 9e1e851bc44f\nRevises: 50b73fd13529\nCreate Date: 2018-01-23 17:32:03.310516\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = '9e1e851bc44f'\ndown_revision = '50b73fd13529'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n #op.create_index(op.f('ix_AdminUser_username'), 'AdminUser', ['username'], unique=True)\n #op.drop_index('ix_AdminUser_username', table_name='AdminUser')\n op.add_column('posts', sa.Column('change_date', sa.DateTime(), nullable=True))\n op.add_column('posts', sa.Column('create_date', sa.DateTime(), nullable=True))\n op.create_index(op.f('ix_posts_title'), 'posts', ['title'], unique=True)\n op.drop_column('posts', 'date')\n op.create_index(op.f('ix_tags_name'), 'tags', ['name'], unique=True)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_tags_name'), table_name='tags')\n op.add_column('posts', sa.Column('date', mysql.DATETIME(), nullable=True))\n op.drop_index(op.f('ix_posts_title'), table_name='posts')\n op.drop_column('posts', 'create_date')\n op.drop_column('posts', 'change_date')\n op.create_index('ix_AdminUser_username', 'AdminUser', ['username'], unique=True)\n op.drop_index(op.f('ix_AdminUser_username'), table_name='AdminUser')\n # ### end Alembic commands ###\n", "repo_name": "lostin21st/MyBlog", "sub_path": "migrations/versions/9e1e851bc44f_datefield_changed.py", "file_name": "9e1e851bc44f_datefield_changed.py", "file_ext": "py", "file_size_in_byte": 1532, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "alembic.op.add_column", "line_number": 23, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 23, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 23, "usage_type": "call"}, {"api_name": "alembic.op.add_column", "line_number": 24, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 24, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 24, "usage_type": "call"}, {"api_name": "alembic.op.create_index", "line_number": 25, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 25, "usage_type": "name"}, {"api_name": "alembic.op.f", "line_number": 25, "usage_type": "call"}, {"api_name": "alembic.op.drop_column", "line_number": 26, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 26, "usage_type": "name"}, {"api_name": "alembic.op.create_index", "line_number": 27, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 27, "usage_type": "name"}, {"api_name": "alembic.op.f", "line_number": 27, "usage_type": "call"}, {"api_name": "alembic.op.drop_index", "line_number": 33, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 33, "usage_type": "name"}, {"api_name": "alembic.op.f", "line_number": 33, "usage_type": "call"}, {"api_name": "alembic.op.add_column", "line_number": 34, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 34, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql.DATETIME", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql", "line_number": 34, "usage_type": "name"}, {"api_name": "alembic.op.drop_index", "line_number": 35, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 35, "usage_type": "name"}, {"api_name": "alembic.op.f", "line_number": 35, "usage_type": "call"}, {"api_name": "alembic.op.drop_column", "line_number": 36, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 36, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 37, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 37, "usage_type": "name"}, {"api_name": "alembic.op.create_index", "line_number": 38, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 38, "usage_type": "name"}, {"api_name": "alembic.op.drop_index", "line_number": 39, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 39, "usage_type": "name"}, {"api_name": "alembic.op.f", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "14196754656", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport tensorflow\nimport keras_leaderboard \n\nimport datetime as dt\nimport os\nimport warnings\n\nnp.random.seed(21)\ntensorflow.set_random_seed(21)\n\nclass make_leaderboard(object):\n def __init__(self, \n X_train, y_train, X_val, y_val, \n data_name, \n augs, \n width, \n height, \n channels, \n optimizer, \n loss, \n metrics, \n classes, \n n_classes, #same as classes\n batch_size, \n model_network_name, \n epochs, \n patience, \n logfile,\n save_folder=None):\n self.X_train = X_train\n self.y_train = y_train\n self.X_val = X_val\n self.y_val = y_val\n self.data_name = data_name\n self.augs = augs\n self.width = width\n self.height = height\n self.channels = channels\n self.optimizer = optimizer\n self.loss = loss\n self.metrics = metrics\n self.classes = classes\n self.n_classes = n_classes #the same value\n self.batch_size = batch_size\n self.model_network_name = model_network_name\n self.epochs = epochs\n self.patience = patience\n self.logfile = logfile\n \n self.dir, self.plot_dir, self.csv_dir= self.make_dirs(save_folder)\n\n \n def register_the_data(self): \n data_object = keras_leaderboard.Register_Data(self.X_train, self.y_train, self.X_val, self.y_val, self.data_name, self.augs)\n xtr_shape, ytr_shape, xval_shape, yval_shape = data_object.register_shapes()\n data_augs, train_datagen = data_object.register_augs()\n \n return data_object, xtr_shape, ytr_shape, xval_shape, yval_shape, data_augs, train_datagen\n \n \n '''\n model_network_name: str \n default model_type is cnn; change to 'vgg' to call vgg\n build as many CNN's as you like \n '''\n \n def build_and_compile_CNN(self, model_type='cnn'):\n model_object = keras_leaderboard.LB_Model(self.width, self.height, self.channels, self.optimizer, self.loss, self.metrics, self.classes, self.batch_size, self.model_network_name)\n if model_type == 'cnn':\n network = model_object.basic_cnn()\n elif model_type == 'vgg':\n network = model_object.basic_vgg()\n return model_object, network\n \n \n '''\n logfile_name: str\n model_network: object\n datagen: object\n default fit_type is no early stopping; change to 'early' if you want early stopping\n ''' \n \n def fit_model(self, model_network, train_datagen, fit_type = 'nostop'):\n fitter_object = keras_leaderboard.LB_Fit(self.X_train, self.y_train, self.X_val, self.y_val, self.batch_size, self.epochs, self.patience, self.logfile, \n self.model_network_name, model_network, train_datagen)\n if fit_type == 'nostop':\n fitter_history, fitter_log = fitter_object.fit_nostop(self.dir)\n elif fit_type == 'early':\n fitter_history, fitter_log = fitter_object.fit_early(self.dir)\n return fitter_object, fitter_history, fitter_log\n \n \n def evaluate_model(self, history, metrics_name, model_network, save=True):\n metrics_object = keras_leaderboard.LB_Model_Metrics(history, metrics_name, model_network, self.X_val, self.y_val, self.n_classes, self.batch_size)\n \n # Save output values\n pred_values, pred_classes = metrics_object.softmax_predict()\n acc, prec, recall = metrics_object.scores(pred_classes)\n\n # Create plots\n acc, loss = metrics_object.losscurves()\n confusion_matrix = metrics_object.confusion_matrix_maker(pred_classes)\n rocs, roc_all = metrics_object.roc_auc(pred_values)\n \n # Save plots\n if save:\n \n np.save(os.path.join(self.dir, \"predicted_values.npy\"), pred_values) # Save prediction to numpy\n \n acc.savefig(os.path.join(self.plot_dir, f\"{metrics_object.name}_accuracy.png\"))\n print('Accuracy plot saved.')\n \n loss.savefig(os.path.join(self.plot_dir, f\"{metrics_object.name}_loss.png\"))\n print('Loss curve saved.')\n \n confusion_matrix.savefig(os.path.join(self.plot_dir, f\"{metrics_object.name}_confusion_matrix.png\"))\n print('Confusion matrix saved.')\n \n for k, plot in enumerate(rocs):\n plot.savefig(os.path.join(self.plot_dir, f\"{metrics_object.name}_{k}_roc_curve.png\")) \n roc_all.savefig(os.path.join(self.plot_dir, f\"{metrics_object.name}_roc_all_classes.png\"))\n \n print('ROC curves saved.')\n \n # Show plots\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n acc.show()\n loss.show()\n \n for plot in rocs: \n plot.show()\n \n roc_all.show()\n \n return metrics_object, pred_values, pred_classes, acc, prec, recall \n \n \n def model_configs(self, data_object, model_object, model_network, xtr_shape, ytr_shape, xval_shape, yval_shape, fitter_object, logfile, acc, prec, rec):\n model_configs_object = keras_leaderboard.leaderboard(data_object, model_object, model_network, self.augs, xtr_shape, ytr_shape, xval_shape, yval_shape, fitter_object, logfile, acc, prec, rec)\n \n model_configs_object.generate_configs() # Generate configuration and model outputs\n model_configs_object.save_configs(self.csv_dir) # Save configuration and model outputs to a csv\n \n return model_configs_object\n \n \n def make_dirs(self, save_folder):\n '''Make directory to save graphs and outputs'''\n id = dt.datetime.now()\n \n # Check if aggregate folder to save to\n if save_folder==None:\n dir_ = f\"{self.model_network_name}_{id.month}-{id.day}-{id.hour}-{id.minute}\"\n else:\n if os.path.isdir(save_folder)==False: # Make aggregate folder if it doesn't exist\n os.mkdir(save_folder)\n \n dir_ = os.path.join(save_folder, f\"{self.model_network_name}_{id.month}-{id.day}-{id.hour}-{id.minute}\")\n \n plot_dir = os.path.join(dir_, 'plots')\n csv_dir = os.path.join(dir_, 'csvs')\n \n os.mkdir(dir_) # Make directory\n os.mkdir(plot_dir) # Make directory for plots\n os.mkdir(csv_dir) # Make directory for output csvs\n \n with open(os.path.join(dir_,'model_parameters.txt'),'w') as f: # Save model params\n f.write(f\"Model Name: {self.model_network_name}\\n\") # Record model name\n f.write(f\"Batch Size: {self.batch_size}\\n\") # Record batch size parameter\n f.write(f\"Epochs: {self.epochs}\\n\") # Record epoch parameter\n f.write(f\"Optimizer: {self.optimizer}\\n\") # Record optimizer parameter\n \n return dir_, plot_dir, csv_dir", "repo_name": "nudro/keras_leaderboard", "sub_path": "run_leaderboard.py", "file_name": "run_leaderboard.py", "file_ext": "py", "file_size_in_byte": 7233, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "97", "api": [{"api_name": "numpy.random.seed", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 12, "usage_type": "attribute"}, {"api_name": "tensorflow.set_random_seed", "line_number": 13, "usage_type": "call"}, {"api_name": "keras_leaderboard.Register_Data", "line_number": 58, "usage_type": "call"}, {"api_name": "keras_leaderboard.LB_Model", "line_number": 72, "usage_type": "call"}, {"api_name": "keras_leaderboard.LB_Fit", "line_number": 88, "usage_type": "call"}, {"api_name": "keras_leaderboard.LB_Model_Metrics", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path", "line_number": 112, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path", "line_number": 114, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path", "line_number": 117, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path", "line_number": 124, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path", "line_number": 125, "usage_type": "attribute"}, {"api_name": "warnings.catch_warnings", "line_number": 130, "usage_type": "call"}, {"api_name": "warnings.simplefilter", "line_number": 131, "usage_type": "call"}, {"api_name": "keras_leaderboard.leaderboard", "line_number": 144, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 154, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 154, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path", "line_number": 160, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 161, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 163, "usage_type": "call"}, {"api_name": "os.path", "line_number": 163, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 165, "usage_type": "call"}, {"api_name": "os.path", "line_number": 165, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 166, "usage_type": "call"}, {"api_name": "os.path", "line_number": 166, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 168, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 169, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 170, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 172, "usage_type": "call"}, {"api_name": "os.path", "line_number": 172, "usage_type": "attribute"}]} +{"seq_id": "8067498190", "text": "from app.json_processing.json_processor import write_to_json_in_dir, read_from_json_in_dir\n\nfrom bs4 import BeautifulSoup\nfrom calendar import monthrange\nfrom datetime import datetime\n\nimport requests\n\n\nclass POGODAKLIMAT_SOURCE_DATA:\n def __init__(self):\n self.russian_stations_ids = []\n self.data_station_temperature_day = {}\n\n def parse_pogodaklimat_day_data(self, start_year, stations_ids):\n min_year = 1881 # мимнимальный год\n # установка начального года\n start_year = start_year if start_year > min_year else min_year\n # определение текущего года и месяца\n today = datetime.today()\n current_year = today.year\n current_month = today.month\n current_day = today.day\n\n for i_station_id, station_id in enumerate(stations_ids):\n print(\"Process station №%s (%s/%s)\"%(str(station_id), str(i_station_id + 1), str(len(stations_ids))))\n \n data_by_station_id = read_from_json_in_dir(\n filename=\"pogodaklimat_day_data_from_station%s.json\" % str(\n station_id),\n dir=\"middle_data\"\n )\n if len(data_by_station_id) > 0:\n self.data_station_temperature_day[station_id] = data_by_station_id\n \n else:\n \n station_name = None\n station_region = None\n station_coordinate = None\n \n # старт парсинга\n for i_year in range(start_year, current_year+1, 1):\n max_month = 12 if i_year < current_year else current_month\n for i_month in range(1, max_month+1):\n current_url = \"http://www.pogodaiklimat.ru/monitor.php?id=%s&month=%s&year=%s\" %\\\n (str(station_id), str(i_month), str(i_year))\n r = requests.get(current_url)\n r.encoding = r.apparent_encoding\n\n b_soup = BeautifulSoup(r.text, 'html.parser')\n \n # анализ текста для определения наименования станции, региона станции, координаты станции\n if station_name is None:\n info_text_obj = b_soup.find_all(\"ul\", {\"class\": \"climate-list\"})[0]\n info_text = info_text_obj.text\n # название станции\n station_name = info_text.split(\n \"Информация о погоде получена с метеорологической станции \")[1].split(\" (\")[0]\n # название региона\n station_region = info_text.split(\n station_name + \" (\")[1].split(\")\")[0].strip()\n station_name = station_name.strip()\n # координаты\n # - широта\n latitude = info_text.split(\n \" широта \")[1].split(\", долгота \")[0].strip()\n latitude = float(latitude)\n # - долгота\n longitude = info_text.split(\n \" долгота \")[1].split(\", высота \")[0].strip()\n longitude = float(longitude)\n station_coordinate = [latitude, longitude]\n \n # анализ таблицы\n rows = b_soup.find_all('tr')\n\n headers_rus = []\n main_headers = []\n sub_headers = []\n \n i_day = 0\n fl_continue = True\n \n for i_row, row in enumerate(rows):\n if fl_continue:\n if i_row < 2:\n # определение заголовков в таблице\n if i_row == 0:\n # забрать основные заголовки\n main_headers = row.text.split(\"\\n\")\n upd_main_headers = [\n header for header in main_headers if header != '']\n main_headers = upd_main_headers\n else:\n # забрать подзаголовки\n sub_headers = row.text.split(\"\\n\")\n upd_sub_headers = [\n header for header in sub_headers if header != '']\n sub_headers = upd_sub_headers\n # сформирвать общие заголовки\n for i_main_header, main_header in enumerate(main_headers):\n if i_main_header != 1:\n headers_rus.append(main_header)\n else:\n for sub_header in sub_headers:\n new_header = \"%s (%s)\" % (main_header, sub_header)\n headers_rus.append(new_header)\n else:\n i_day += 1\n monthrange_range = monthrange(i_year, i_month)\n if i_day <= monthrange_range[1]:\n \n if i_day != current_day or \\\n (i_day == current_day and (i_year != current_year or i_month != current_month)):\n\n i_day_str = str(i_day) if len(\n str(i_day)) == 2 else \"0\"+str(i_day)\n i_month_str = str(i_month) if len(\n str(i_month)) == 2 else \"0\"+str(i_month)\n i_date = \"%s.%s.%s\" % (i_day_str, i_month_str, str(i_year))\n \n # считывание основных значений строк\n row_tags = [sub_tag for sub_tag in row.contents if not sub_tag in [\"\\n\", \" \", \"\"]]\n \n obj_info_by_day = {}\n for i, header in enumerate(headers_rus):\n if i > 0:\n txt_value = row_tags[i].text.replace('+', '')\n obj_info_by_day[header] = float(txt_value) \\\n if not txt_value is None and \\\n not txt_value.lower() in [\"none\", \"null\", \" \", \"\"] \\\n else None\n\n if not station_id in self.data_station_temperature_day.keys():\n # добавление данных\n self.data_station_temperature_day[station_id] = {\n \"station_name\": station_name,\n \"station_region\": station_region,\n \"station_coordinate\": station_coordinate,\n \"weather_dates\": {\n i_date: obj_info_by_day\n }\n }\n else:\n self.data_station_temperature_day[station_id][\"weather_dates\"][i_date] = obj_info_by_day\n \n else:\n if i_year == current_year and i_month == current_month:\n fl_continue = False\n else:\n fl_continue = False\n else:\n break\n\n # write_to_json_in_dir(\n # filename=\"pogodaklimat_day_data_from_station%s.json\" %str(station_id),\n # data=self.data_station_temperature_day[station_id],\n # dir=\"middle_data\"\n # )\n\n def parse_pogodaklimat_station_ids(self):\n # определение текущего года и месяца\n today = datetime.today()\n current_year = today.year\n current_month = today.month\n \n # получить номера станций по ссылке за текущие год и месяц\n url = \"http://www.pogodaiklimat.ru/monitors.php?id=rus&month=%s&year=%s\" % (\n str(current_month), str(current_year))\n r = requests.get(url)\n r.encoding = r.apparent_encoding\n\n b_soup = BeautifulSoup(r.text, 'html.parser')\n rows = b_soup.find_all('tr')\n \n # старт парсинга\n for i_row, row in enumerate(rows):\n # определение заголовков в таблице\n if i_row > 1:\n # считывание основных значений строк\n row_text = row.text[1:-1]\n cells_values = row_text.split(\"\\n\")\n if len(cells_values) == 1:\n if cells_values[0] != \"Россия\":\n break\n else:\n continue\n else:\n station_id = int(cells_values[0]) if not cells_values[0] is None and \\\n not cells_values[0].lower() in [\"none\", \"null\", \"\", \" \"] else None\n self.russian_stations_ids.append(station_id)\n", "repo_name": "crackpotwolf/Hackathon-TEK", "sub_path": "Weather_Service/app/data_getting/parsing/pogodaklimat_parser.py", "file_name": "pogodaklimat_parser.py", "file_ext": "py", "file_size_in_byte": 8625, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "datetime.datetime.today", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 20, "usage_type": "name"}, {"api_name": "app.json_processing.json_processor.read_from_json_in_dir", "line_number": 28, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 48, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 51, "usage_type": "call"}, {"api_name": "calendar.monthrange", "line_number": 111, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 164, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 164, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 171, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 174, "usage_type": "call"}]} +{"seq_id": "26505234264", "text": "from typing import Any, Dict, Type, TypeVar, Union\n\nfrom attrs import define as _attrs_define\n\nfrom ..types import UNSET, Unset\n\nT = TypeVar(\"T\", bound=\"MediaUrl\")\n\n\n@_attrs_define\nclass MediaUrl:\n \"\"\"\n Attributes:\n url (Union[Unset, None, str]):\n name (Union[Unset, None, str]):\n \"\"\"\n\n url: Union[Unset, None, str] = UNSET\n name: Union[Unset, None, str] = UNSET\n\n def to_dict(self) -> Dict[str, Any]:\n url = self.url\n name = self.name\n\n field_dict: Dict[str, Any] = {}\n field_dict.update({})\n if url is not UNSET:\n field_dict[\"Url\"] = url\n if name is not UNSET:\n field_dict[\"Name\"] = name\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n url = d.pop(\"Url\", UNSET)\n\n name = d.pop(\"Name\", UNSET)\n\n media_url = cls(\n url=url,\n name=name,\n )\n\n return media_url\n", "repo_name": "nqh00/gautrang", "sub_path": "utils/jellyfix/jellyfin_api_client/models/media_url.py", "file_name": "media_url.py", "file_ext": "py", "file_size_in_byte": 1000, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "typing.TypeVar", "line_number": 7, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 18, "usage_type": "name"}, {"api_name": "types.Unset", "line_number": 18, "usage_type": "name"}, {"api_name": "types.UNSET", "line_number": 18, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 19, "usage_type": "name"}, {"api_name": "types.Unset", "line_number": 19, "usage_type": "name"}, {"api_name": "types.UNSET", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 25, "usage_type": "name"}, {"api_name": "types.UNSET", "line_number": 27, "usage_type": "name"}, {"api_name": "types.UNSET", "line_number": 29, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 35, "usage_type": "name"}, {"api_name": "types.UNSET", "line_number": 37, "usage_type": "argument"}, {"api_name": "types.UNSET", "line_number": 39, "usage_type": "argument"}, {"api_name": "attrs.define", "line_number": 10, "usage_type": "name"}]} +{"seq_id": "11505899094", "text": "import copy\nfrom datetime import datetime\nimport os\nimport numpy as np\nfrom scipy.ndimage.interpolation import shift\n\n\nclass IMU(object):\n def __init__(self):\n NSIZE = 3\n __CARLA_VERSION__ = os.getenv('CARLA_VERSION', '0.8.X')\n\n self._position = {'x': np.full(NSIZE, np.nan), 'y': np.full(NSIZE, np.nan), 'z': np.full(NSIZE, np.nan)}\n self._orientation = {'pitch': np.full(NSIZE, np.nan), 'roll':np.full(NSIZE, np.nan), 'yaw': np.full(NSIZE, np.nan)}\n self._time = np.full(NSIZE, np.nan)\n\n\n def update_state(self, measurements):\n # time update\n shift(self._times, -1, cval=np.NaN)\n self._times[-1] = datetime.now()\n\n # position update\n self._position['x'] = shift(self._position['x'], -1, cval=np.NaN)\n self._position['x'][-1] = measurements.player_measurements.transform.location.x\n\n self._position['y'] = shift(self._position['y'], -1, cval=np.NaN)\n self._position['y'][-1] = measurements.player_measurements.transform.location.y\n\n self._position['z'] = shift(self._position['z'], -1, cval=np.NaN)\n if __CARLA_VERSION__ == '0.8.X':\n self._position['z'][-1] = 1.6\n else:\n self._position['z'][-1] = measurements.player_measurements.transform.location.z\n\n # orientation update\n self._orientation['pitch'] = shift(self._orientation['pitch'], -1, cval=np.NaN)\n self._orientation['pitch'][-1] = measurements.player_measurements.transform.orientation.x\n\n self._orientation['roll'] = shift(self._orientation['roll'], -1, cval=np.NaN)\n self._orientation['roll'][-1] = measurements.player_measurements.transform.orientation.y\n\n self._orientation['yaw'] = shift(self._orientation['yaw'], -1, cval=np.NaN)\n self._orientation['yaw'][-1] = measurements.player_measurements.transform.orientation.z\n\n def _compute_inertial_measurements(self):\n imu_dict = {'acc_x':0, 'acc_y':0, 'acc_z':0, 'd_pitch':0, 'd_roll':0, 'd_yaw':0, 'dt':0}\n\n # if not warming-up period\n if not np.any(np.isnan(self._time)):\n delta = self._times[-1] - self._times[-2]\n delta = delta.seconds + delta.microseconds / 1E6\n imu_dict['dt'] = delta\n\n imu_dict['acc_x'] = (self._position['x'][-1] - 2.0 * self._position['x'][-2] + self._position['x'][-3]) / (delta * delta)\n imu_dict['acc_y'] = (self._position['y'][-1] - 2.0 * self._position['y'][-2] + self._position['y'][-3]) / (delta * delta)\n imu_dict['acc_z'] = (self._position['z'][-1] - 2.0 * self._position['z'][-2] + self._position['z'][-3]) / (delta * delta)\n\n imu_dict['d_pitch'] = (self._orientation['pitch'][-1] - self._orientation['pitch'][-2]) / delta\n imu_dict['d_roll'] = (self._orientation['roll'][-1] - self._orientation['roll'][-2]) / delta\n imu_dict['d_yaw'] = (self._orientation['yaw'][-1] - self._orientation['yaw'][-2]) / delta\n\n return imu_dict\n\n def __call__(self, *args, **kwargs):\n measurements = args[0]\n self.update_state(measurements)\n return self._compute_inertial_measurements()\n", "repo_name": "gy20073/CIL_modular", "sub_path": "imu.py", "file_name": "imu.py", "file_ext": "py", "file_size_in_byte": 3163, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "94", "api": [{"api_name": "os.getenv", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 13, "usage_type": "attribute"}, {"api_name": "numpy.full", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 14, "usage_type": "attribute"}, {"api_name": "numpy.full", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 15, "usage_type": "attribute"}, {"api_name": "scipy.ndimage.interpolation.shift", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.NaN", "line_number": 20, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 21, "usage_type": "name"}, {"api_name": "scipy.ndimage.interpolation.shift", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.NaN", "line_number": 24, "usage_type": "attribute"}, {"api_name": "scipy.ndimage.interpolation.shift", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.NaN", "line_number": 27, "usage_type": "attribute"}, {"api_name": "scipy.ndimage.interpolation.shift", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.NaN", "line_number": 30, "usage_type": "attribute"}, {"api_name": "scipy.ndimage.interpolation.shift", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.NaN", "line_number": 37, "usage_type": "attribute"}, {"api_name": "scipy.ndimage.interpolation.shift", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.NaN", "line_number": 40, "usage_type": "attribute"}, {"api_name": "scipy.ndimage.interpolation.shift", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.NaN", "line_number": 43, "usage_type": "attribute"}, {"api_name": "numpy.any", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "36623588632", "text": "'''\n\nThis script retrieves the list of all available turnstile data files \nfrom the New York MTA web site, and saves the files to a local folder.\n\nThe page where the files are listed is http://web.mta.info/developers/turnstile.html\n\nThe file names from the page do not include the first two digits for \nthe year, the missing digits are added to the file name when the file \nis saved. Didn't they live through Y2K?\n\nIn their defense, the available files go back to 2010 (as I write this),\nso there is no risk of confusion.\n\nNote: I use the requests library instead of urllib2 because I prefer writing\n r = requests.get(myurl)\ninstead of \n r = urllib2.urlopen(myurl)\nIt saves me 3 characters. Otherwise functionality is the same for this simple case.\n\n'''\n\nimport requests\nfrom bs4 import BeautifulSoup as BS4\nimport re\nfrom timeit import default_timer as timer\nimport humanfriendly\n\n# initialize URL and folder, record start time\nurlroot = r'http://web.mta.info/developers/'\npath = r'e:\\python\\MTAturnstile\\turnstile_20'\nstarttime = timer()\n\n# read the page html and use BeautifulSoup to extract the list of data files\nr = requests.get(urlroot + 'turnstile.html')\nsoup = BS4(r.content, features = 'html.parser')\nfiles = soup.find('div', {'id': 'contentbox'}).find('div', {'class': 'container'}).find('div', {'class': 'span-84 last'}).findAll('a', attrs={'href': re.compile(\"^data/nyct/turnstile/\")})\n\n# iterate through the list of files, retrieve the data for each file, and save file to the local folder\nfor file in files:\n\tprint('Saving file turnstile_20' + str(file)[39:49])\n\tdatafile = requests.get(urlroot + str(file)[9:49])\n\twith open(path + str(file)[39:49], 'w') as outf:\n\t\tfor line in datafile.text:\n\t\t outf.writelines(line)\n\n# record completion time and display duration\nendtime = timer()\nprint('Completed in ' + humanfriendly.format_timespan(endtime-starttime))\n", "repo_name": "RollingHillsAnalytics/MTA-extraction", "sub_path": "retrieve-MTA-turnstile-data-files.py", "file_name": "retrieve-MTA-turnstile-data-files.py", "file_ext": "py", "file_size_in_byte": 1881, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "97", "api": [{"api_name": "timeit.default_timer", "line_number": 32, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 35, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 36, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 37, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 42, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 48, "usage_type": "call"}, {"api_name": "humanfriendly.format_timespan", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "17223082659", "text": "import torch.nn as nn\n\n\nclass Discriminator(nn.Module):\n def __init__(self, input_dim, wasserstein=False):\n super(Discriminator, self).__init__()\n self.model = nn.Sequential(\n nn.Linear(input_dim, 2 * input_dim // 3),\n nn.LeakyReLU(0.2),\n nn.Linear(2 * input_dim // 3, input_dim // 3),\n nn.LeakyReLU(0.2),\n nn.Linear(input_dim // 3, 1),\n )\n\n if not wasserstein:\n self.model.add_module(\"activation\", nn.Sigmoid())\n\n def forward(self, x):\n return self.model(x)\n", "repo_name": "opendp/smartnoise-sdk", "sub_path": "synth/snsynth/pytorch/nn/_discriminator.py", "file_name": "_discriminator.py", "file_ext": "py", "file_size_in_byte": 566, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 220, "dataset": "github-code", "pt": "94", "api": [{"api_name": "torch.nn.Module", "line_number": 4, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 4, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 7, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 7, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 8, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 8, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 9, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 10, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "34619207614", "text": "import pygame as pg\nimport math\nimport random\nimport palette\nimport collision\nfrom hitbox import *\n\n\n# define constants\nRAINBOW_CYCLE_SPEED = 10\n# particles\nPARTICLE_SPEED = 4\nPARTICLE_SIZE = 4\nFADE_SPEED = 0.95\n# trail\nPARTICLE_LIFE = 20\n\n\nclass Particle:\n\t'''a single particle'''\n\tdef __init__(self, master, game, speed=PARTICLE_SPEED, direction=0, origin='plank',\n\t size=PARTICLE_SIZE, shape='square', fade_speed=FADE_SPEED, trail=None):\n\t\tself.master = master\n\t\tself.game = game\n\t\tself.hitbox = Hitbox(self, [size]*2)\n\n\t\t# position\n\t\tself.x = self.master.x\n\t\tself.y = self.master.y\n\n\t\t# properties\n\t\tself.speed = speed\n\t\tself.direction = direction\n\t\tself.direction += master.direction + 180 if hasattr(master, 'direction') else 0\n\t\tself.bounce_spread = 70\n\t\tself.fade_speed = fade_speed\n\n\t\t# appearance\n\t\tself.color = self.master.color if hasattr(master, 'color') else palette.DARK\n\t\tself.size = size\n\t\tself.shape = shape\n\t\tself.trail = trail\n\n\n\t\tif origin == 'plank':\n\t\t\toffset = random.randint(-self.master.width//2,\n\t\t\t self.master.width//2)\n\t\t\tself.x = self.master.x + offset\n\t\t\tself.y = self.master.y + self.master.height//2\n\n\tdef update(self):\n\t\tself.move()\n\t\tself.fade()\n\n\tdef render(self, screen):\n\t\t# if self.hitbox.show: self.hitbox.render(screen)\n\t\tif self.shape == 'square':\n\t\t\tpg.draw.rect(screen, self.color, [self.x-self.size//2, self.y-self.size//2, self.size, self.size])\n\t\t\treturn\n\t\tif self.shape == 'circle':\n\t\t\tpg.draw.circle(screen, self.color, [int(self.x), int(self.y)], self.size//2)\n\t\t\treturn\n\n\tdef fade(self, fade_speed=FADE_SPEED):\n\t\tself.color = self.blendColor(self.color, self.game.screen_color, self.fade_speed)\n\n\tdef blendColor(self, color0, color1, ratio=0.5):\n\t\treturn [color0[idx]*ratio + color1[idx]*(1-ratio) for idx in range(3)]\n\n\tdef move(self):\n\t\ttheta = math.radians(self.direction)\n\t\tself.x += math.sin(theta) * self.speed\n\t\tself.y += math.cos(theta) * self.speed\n\t\t\n\t\tself.check_bounds()\n\t\tself.check_collision()\n\n\tdef check_bounds(self):\n\t\tif self.x <= 0:\n\t\t\tx = 0\n\t\t\tself.direction = -90\n\t\t\tself.bounce()\n\t\tif self.x >= 800:\n\t\t\tx = 800\n\t\t\tself.direction = 90\n\t\t\tself.bounce()\n\t\tif self.y <= 0:\n\t\t\ty = 0\n\t\t\tself.direction = 180 - self.direction\n\t\t\tself.bounce()\n\t\tif self.y >= 640:\n\t\t\ty = 640\n\t\t\tself.direction = 180 - self.direction\n\t\t\tself.bounce()\n\n\tdef check_collision(self):\n\t\tif collision.detect(self.game.bar, self):\n\t\t\tself.direction = 180 - self.direction\n\t\t\tself.bounce()\n\n\tdef bounce(self):\n\t\tself.direction += random.randint(-self.bounce_spread, self.bounce_spread)\n\n\n\n\nclass Trail:\n\t'''a trail of particles'''\n\tdef __init__(self, master, game, life=PARTICLE_LIFE, density=1, spread=0, fade_speed=FADE_SPEED,\n\t\t\t\t\tptcl_speed=PARTICLE_SPEED, direction=0, origin='plank',\n\t\t\t\t\tptcl_size=PARTICLE_SIZE, ptcl_shape='square', style=None):\n\t\tself.master = master\n\t\tself.game = game\n\n\t\t# properties\n\t\tself.life = life\n\t\tself.density = density\n\t\tself.spread = spread\n\t\tself.direction = direction\n\t\tself.origin = origin\n\t\tself.style = None\n\n\t\t# variables\n\t\tself.idx = 0\n\t\tself.qty = self.life * self.density\n\n\t\t# particle properties\n\t\tself.fade_speed = fade_speed\n\t\tself.ptcl_speed = ptcl_speed\n\t\tself.ptcl_size = ptcl_size\n\t\tself.ptcl_shape = ptcl_shape\n\n\t\t# trail\n\t\tself.trail = [self.place_ptcl()] * self.qty\n\n\tdef place_ptcl(self):\n\t\treturn Particle(self.master, self.game, speed=self.ptcl_speed, fade_speed=self.fade_speed,\n\t\t\t\tdirection=random.randint(-self.spread, self.spread) + self.direction,\n\t\t\t\torigin=self.origin, trail=self,\n\t\t\t\tsize=self.ptcl_size, shape=self.ptcl_shape)\n\n\tdef update(self):\n\t\tfor ptcl in self.trail: ptcl.update()\n\t\tself.qty = self.life * self.density\n\t\tself.idx = (self.idx+self.density) % self.qty\n\t\tfor itr in range(self.density):\n\t\t\tself.trail[(self.idx+itr) % self.qty] = self.place_ptcl()\n\n\tdef render(self, screen):\n\t\tfor ptcl in self.trail: ptcl.render(screen)\n\n\n\n\n\ndef make(master, game, life=PARTICLE_LIFE, density=1, spread=0, fade_speed=FADE_SPEED,\n\t\t\t\tptcl_speed=PARTICLE_SPEED, direction=0, origin='plank',\n\t\t\t\tptcl_size=PARTICLE_SIZE, ptcl_shape='square', style=None):\n\tmaster.trail = Trail(master, game, life, density, spread, fade_speed,\n\t\t\t\t\t\t\tptcl_speed, direction, origin,\n\t\t\t\t\t\t\tptcl_size, ptcl_shape, style)\n\ndef remove(master):\n\ttry:\n\t\tdel master.trail\n\texcept:\n\t\tpass\n\n\n", "repo_name": "mightbesimon/PONG", "sub_path": "particles.py", "file_name": "particles.py", "file_ext": "py", "file_size_in_byte": 4346, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "94", "api": [{"api_name": "palette.DARK", "line_number": 39, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 58, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 58, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 61, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 61, "usage_type": "attribute"}, {"api_name": "math.radians", "line_number": 71, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 72, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 73, "usage_type": "call"}, {"api_name": "collision.detect", "line_number": 97, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 102, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 138, "usage_type": "call"}]} +{"seq_id": "72721488949", "text": "from sys import argv\r\n\r\n'''Task 1'''\r\n\r\n'''salary = (rate per hour * hour) + bonus'''\r\nscript_name, rate_per_hour, hours, bonus = argv\r\n\r\nprint('Часовая ставка:', rate_per_hour)\r\nprint('Отработано часов:', hours)\r\nprint('Премия:', bonus)\r\n\r\nsalary = float(rate_per_hour) * float(hours) + float(bonus)\r\nprint('Зарплата:', salary)\r\n\r\n\r\n'''Task 2'''\r\n\r\nimport random\r\n\r\n\r\nmy_list = [random.randint(-100, 100) for i in range(20)]\r\n\r\nprint(my_list)\r\n\r\nmy_list_2 = []\r\ncount = 0\r\n\r\nfor i in range(len(my_list)-1):\r\n if my_list[i] < my_list[i + 1]:\r\n my_list_2.append(my_list[i + 1])\r\n\r\nprint(my_list_2)\r\n\r\n'''Task 3'''\r\n\r\nmy_list_number = [i for i in range(20, 241) if i % 20 == 0 or i % 21 == 0]\r\nprint(my_list_number)\r\n\r\n\r\n'''Task 4'''\r\n\r\nmy_list_numbers = [random.choice(range(0, 50)) for i in range(20)]\r\nprint(my_list_numbers)\r\n\r\nmy_list_numbers_string = ' '.join(map(str, my_list_numbers))\r\n\r\nunique_numbers = []\r\n\r\nfor i in my_list_numbers:\r\n if my_list_numbers_string.count(str(i)) == 1:\r\n unique_numbers.append(i)\r\n\r\nprint(unique_numbers)\r\n\r\nfrom functools import reduce\r\n\r\n'''Task 5'''\r\n\r\nmy_list_task_5_numbers = [i for i in range(100, 1001) if i % 2 == 0]\r\n\r\nresult = reduce(lambda x, y: x * y, my_list_task_5_numbers)\r\n\r\nprint(result)\r\n\r\nimport itertools\r\n\r\n'''Task 6'''\r\n\r\nwhile True:\r\n my_list_lask_6 = [i for i in itertools.count(5)]\r\n if len(my_list_lask_6) >= 20:\r\n break\r\n\r\nmy_list_string_task_6 = ['I', 'L', 'Y']\r\n\r\ncount = 0\r\n\r\nfor i in itertools.cycle(my_list_string_task_6):\r\n print(i)\r\n count += 1\r\n if count == 10:\r\n break\r\n\r\n\r\n'''Task 7'''\r\n\r\n\r\ndef fact(n):\r\n result = 1\r\n for i in range(1, n+1):\r\n result *= i\r\n yield result\r\n\r\nfor i in fact(5):\r\n print(i)\r\n", "repo_name": "YShangareev/Geek_brains", "sub_path": "Main_program/First_quarter/Python_introduction/Homework_4.py", "file_name": "Homework_4.py", "file_ext": "py", "file_size_in_byte": 1793, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "sys.argv", "line_number": 6, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 21, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 42, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 61, "usage_type": "call"}, {"api_name": "itertools.count", "line_number": 70, "usage_type": "call"}, {"api_name": "itertools.cycle", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "18541798617", "text": "import matplotlib.pyplot as plt\nimport fromJSON as frm\nimport scipy.stats as scp\nimport numpy as np\nimport pymatgen as pmg\nimport json\nimport yaml\n\nnp.set_printoptions(threshold=np.inf)\n\nnames = ['rAvg', 'mAvg', 'colAvg', 'rowAvg', 'atNumAvg', 'sValFrac', 'pValFrac', 'dValFrac', \\\n 'fValFrac', 'sValAvg', 'pValAvg', 'dValAvg', 'fValAvg', 'elAffAvg']\n\ntitles = {'formation_energy_per_atom':'E$_{form}$ per atom','band_gap':'Band Gap',\\\n 'energy_per_atom':'E per atom', 'rAvg':'Average Radius', 'mAvg':'Average Mass',\\\n 'colAvg':'Average Column', 'rowAvg':'Average Row', 'atNumAvg':'Average Atomic Number',\\\n 'sValFrac':'S Valence $e^{-}$ Fraction', 'pValFrac':'P Valence $e^{-}$ Fraction',\\\n 'dValFrac':'D Valence $e^{-}$ Fraction', 'fValFrac':'F Valence $e^{-}$ Fraction',\\\n 'sValAvg':'Average S Valence $e^{-}s$', 'pValAvg':'Average P Valence $e^{-}s$',\\\n 'dValAvg':'Average D Valence $e^{-}s$', 'fValAvg':'Average F Valence $e^{-}s$',\\\n 'elAffAvg':'Average Electron Affinity', 'elNegAvg':'Average Electronegativity',\\\n 'G_VRH':'$G_{VRH}$', 'K_VRH':'$K_{VRH}$'}\n\norders = {'rAvg': 0, 'mAvg': 1, 'colAvg': 2, 'rowAvg': 3, 'atNumAvg': 4, 'sValFrac': 5,\\\n 'pValFrac': 6, 'dValFrac': 7, 'fValFrac': 8, 'sValAvg': 9, 'pValAvg': 10,\\\n 'dValAvg': 11, 'fValAvg': 12, 'elAffAvg': 13}\n\ngroups = {'triclinic':[1,2], 'monoclinic':[3,15], 'orthorhombic':[16,74], 'tetragonal':[75,142],\\\n 'trigonal':[143,167], 'hexagonal':[168,194], 'cubic':[195,230], 'all':[0,230]}\n\n\n\nif __name__ == '__main__':\n \"\"\" main method\n * inputs data from filteredMaterialsData.json; reads through the\n data dictionary for each compound and creates arrays from the data\n * for both G_VRH and K_VRH, plots each array and the pearson value for\n how well each input correlates with the G_VRH and K_VRH data\n * writes the pearson values to file, identifying the modulus and data used\n \"\"\"\n\n des_spgr = 'all' #Enter your desired space group!!!\n groups = {'triclinic':[1,2], 'monoclinic':[3,15], 'orthorhombic':[16,74], 'tetragonal':[75,142], \\\n 'trigonal':[143,167], 'hexagonal':[168,194], 'cubic':[195,230], 'all':[0,230]}\n\n with open('TextFiles/materialsData.json') as jFile:\n data = json.load(jFile)\n \n wFile = open('TextFiles/pearsonVals.txt', 'w')\n stress = ['$G_{VRH}$', '$K_{VRH}$']\n\n xs, Gvrh, Kvrh = frm.refineSpaceGroup(groups[des_spgr], orders)\n xs = np.transpose(xs)\n stressType = [Gvrh, Kvrh]\n for ind in range(len(stress)):\n plt.figure(ind+1)\n currStress = stress[ind]\n \n wFile.write('\\n'+currStress+':\\n')\n\n for k in range(1, len(names)):\n curr = stressType[ind]\n arr = xs[:][orders[names[k]]]\n \n a = plt.subplot(4,4,k+1)\n pearVal = scp.pearsonr(arr, curr)\n a.plot(arr, curr, marker='o', alpha=0.2, linestyle='None')\n string = titles[names[k]]+' '+str(pearVal)+'\\n'\n a.set_title(currStress+' vs. '+titles[names[k]]+' '+str(round(pearVal[0], 5)))\n a.set_xlabel(titles[names[k]])\n a.set_ylabel(currStress)\n plt.xscale('linear')\n plt.yscale('linear')\n plt.subplots_adjust(hspace=0.5)\n wFile.write(string)\n\n plt.show()\n wFile.close()\n", "repo_name": "jonicschmidt/materials_machine_learning", "sub_path": "Acquiring Data/plotAndCoeff.py", "file_name": "plotAndCoeff.py", "file_ext": "py", "file_size_in_byte": 3546, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "94", "api": [{"api_name": "numpy.set_printoptions", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 9, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 47, "usage_type": "call"}, {"api_name": "fromJSON.refineSpaceGroup", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "scipy.stats.pearsonr", "line_number": 66, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xscale", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yscale", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots_adjust", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}]} +{"seq_id": "23921927876", "text": "import pygame\nimport random\nimport math\nimport asyncio\nfrom gameObject import Character, Barricade, Terminate\npygame.init()\nscreen = pygame.display.set_mode((400, 300))\ndone = False\nFPSCLOCK = pygame.time.Clock()\n\ndef get_distance(location1, location2):\n x = (location2[0] - location1[0]) * (location2[0] - location1[0])\n y = (location2[1] - location1[1]) * (location2[1] - location1[1])\n return math.sqrt(x + y)\n\nclass GameState:\n def __init__(self):\n # initialize objects\n self.character = Character(screen)\n self.barricade = Barricade(screen, 100,random.randrange(0,200))\n self.barricade2 = Barricade(screen, 200,random.randrange(0,200))\n self.terminate = Terminate(screen, 400, 300)\n self.timer = 0\n\n\n def frame_step(self, input_actions):\n # internally process pygame event handlers\n pygame.event.pump()\n\n reward = 0\n terminal = False\n\n if sum(input_actions) != 1:\n raise ValueError('Multiple input actions!')\n\n # input_actions[0] == 1: stay\n # input_actions[1] == 1: go left\n # input_actions[2] == 1: go right\n # input_actions[3] == 1: go up\n # input_actions[4] == 1: go down\n\n else:\n self.character.update(input_actions)\n\n self.barricade.update()\n self.barricade2.update()\n # check if character collides with barricade\n # if (get_distance(self.character.location, self.terminate.location) / 1000.0) < 0.45:\n # reward = 0.1\n\n # if (get_distance(self.character.location, self.terminate.location) / 1000.0) < 0.35:\n # reward = 0.2\n \n # if (get_distance(self.character.location, self.terminate.location) / 1000.0) < 0.25:\n # reward = 0.4\n\n # if (get_distance(self.character.location, self.terminate.location) / 1000.0) < 0.15:\n # reward = 0.6\n reward = -(get_distance(self.character.location, self.terminate.location)/ 100000)\n if self.barricade.iscrashed(self.character.location_x, self.character.location_y) or self.barricade2.iscrashed(self.character.location_x, self.character.location_y):\n reward = -1\n print(\"crashed\")\n terminal = True\n \n if self.terminate.isCrashed(self.character.location_x, self.character.location_y):\n reward = 1000\n print(\"crashed terminal\")\n terminal = True\n \n screen.fill((0, 0, 0))\n self.character.draw()\n self.barricade.draw()\n self.barricade2.draw()\n self.terminate.draw()\n print(reward)\n \n image_data = pygame.surfarray.array3d(pygame.display.get_surface())\n if self.timer == 1000:\n self.timer = 0\n terminal = True\n if not self.timer % 100:\n print(\"=============================================================\")\n print(\"Game Timer :\", end = \"\")\n print(self.timer)\n if terminal:\n self.__init__()\n pygame.display.update()\n FPSCLOCK.tick(30)\n self.timer += 1\n \n return image_data, reward, terminal\n", "repo_name": "Soonmok/2018-AI-Study-Seminar", "sub_path": "our_projects/keeping_pablo/game.py", "file_name": "game.py", "file_ext": "py", "file_size_in_byte": 3175, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "97", "api": [{"api_name": "pygame.init", "line_number": 6, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 7, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 9, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 14, "usage_type": "call"}, {"api_name": "gameObject.Character", "line_number": 19, "usage_type": "call"}, {"api_name": "gameObject.Barricade", "line_number": 20, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 20, "usage_type": "call"}, {"api_name": "gameObject.Barricade", "line_number": 21, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 21, "usage_type": "call"}, {"api_name": "gameObject.Terminate", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.event.pump", "line_number": 28, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pygame.surfarray.array3d", "line_number": 77, "usage_type": "call"}, {"api_name": "pygame.surfarray", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pygame.display.get_surface", "line_number": 77, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 87, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 87, "usage_type": "attribute"}]} +{"seq_id": "71346624308", "text": "import smplx\nfrom model import VPoserModel\nimport torch\nimport torch.nn as nn\nimport numpy as np\n\n\nfrom mesh_intersection.bvh_search_tree import BVH\nimport mesh_intersection.loss as collisions_loss\n\n\nclass IntersectLoss(nn.Module):\n def __init__(\n self,\n model: smplx.SMPL,\n device=torch.device('cpu'),\n dtype=torch.float32,\n batch_size=1,\n weight=1,\n sigma=0.5,\n max_collisions=8,\n point2plane=True\n ):\n \"\"\"Intersections loss layer.\n\n Args:\n device ([type], optional): [description]. Defaults to torch.device('cpu').\n dtype ([type], optional): [description]. Defaults to torch.float32.\n weight (int, optional): Weight factor of the loss. Defaults to 1.\n sigma (float, optional): The height of the cone used to calculate the distance field loss. Defaults to 0.5.\n max_collisions (int, optional): The maximum number of bounding box collisions. Defaults to 8.\n \"\"\"\n\n super(IntersectLoss, self).__init__()\n\n self.has_parameters = False\n\n with torch.no_grad():\n output = model(get_skin=True)\n verts = output.vertices\n\n face_tensor = torch.tensor(\n model.faces.astype(np.int64),\n dtype=torch.long,\n device=device) \\\n .unsqueeze_(0) \\\n .repeat(\n [batch_size,\n 1, 1])\n\n bs, nv = verts.shape[:2]\n bs, nf = face_tensor.shape[:2]\n\n faces_idx = face_tensor + \\\n (torch.arange(bs, dtype=torch.long).to(device) * nv)[:, None, None]\n\n self.register_buffer(\"faces_idx\", faces_idx)\n\n # Create the search tree\n self.search_tree = BVH(max_collisions=max_collisions)\n\n self.pen_distance = \\\n collisions_loss.DistanceFieldPenetrationLoss(sigma=sigma,\n point2plane=point2plane,\n vectorized=True)\n\n # create buffer for weights\n self.register_buffer(\n \"weight\",\n torch.tensor(weight, dtype=dtype).to(device=device)\n )\n\n def forward(self, pose, joints, points, keypoints, raw_output):\n verts = raw_output.vertices\n polygons = verts.view([-1, 3])[self.faces_idx]\n\n # find collision idx\n with torch.no_grad():\n collision_idxs = self.search_tree(polygons)\n\n # compute penetration loss\n return self.pen_distance(polygons, collision_idxs) * self.weight\n", "repo_name": "gosticks/body-pose-animation", "sub_path": "modules/intersect.py", "file_name": "intersect.py", "file_ext": "py", "file_size_in_byte": 2587, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "94", "api": [{"api_name": "torch.nn.Module", "line_number": 12, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "smplx.SMPL", "line_number": 15, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 17, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 42, "usage_type": "call"}, {"api_name": "model.faces.astype", "line_number": 43, "usage_type": "call"}, {"api_name": "model.faces", "line_number": 43, "usage_type": "attribute"}, {"api_name": "numpy.int64", "line_number": 43, "usage_type": "attribute"}, {"api_name": "torch.long", "line_number": 44, "usage_type": "attribute"}, {"api_name": "torch.arange", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 55, "usage_type": "attribute"}, {"api_name": "mesh_intersection.bvh_search_tree.BVH", "line_number": 60, "usage_type": "call"}, {"api_name": "mesh_intersection.loss.DistanceFieldPenetrationLoss", "line_number": 63, "usage_type": "call"}, {"api_name": "mesh_intersection.loss", "line_number": 63, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "39155828138", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# # Exploring the summary data of flights from Brazil\n\n# One of the industries that was very impacted by the COVID-19 pandemic was the air transport industry. The data shared by ANAC - the Brazilian Civil Aviation Authority - presents how it was affected. Let's explore this data in this Jupyter notebook and see if we can answer some questions and get some insights.\n\n# The data used in this notebook may be found at:\n# \n# - https://www.gov.br/anac/pt-br/assuntos/dados-e-estatisticas/dados-estatisticos/arquivos/resumo_anual_2019.csv\n# - https://www.gov.br/anac/pt-br/assuntos/dados-e-estatisticas/dados-estatisticos/arquivos/resumo_anual_2020.csv\n# - https://www.gov.br/anac/pt-br/assuntos/dados-e-estatisticas/dados-estatisticos/arquivos/resumo_anual_2021.csv\n\n# First of all, let's import the libraries we are going to use:\n\n# In[1]:\n\n\nimport os\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport unidecode\n\n\n# I am using the Seaborn library instead of matplotlib. I am also using the unidecode library to convert the column names to a more friendly format.\n\n# Now the files are loaded and merged into a single dataframe.\n\n# In[2]:\n\n\nfolder = r'C:\\Users\\thiag\\data\\ANAC-transport'\n\ndffiles = ['resumo_anual_2019.csv',\n 'resumo_anual_2020.csv',\n 'resumo_anual_2021.csv']\n\ndf = pd.concat([pd.read_csv(os.path.join(folder, x),\n sep=';', encoding=('ISO-8859-1'))\n for x in dffiles])\n\n\n# Let's look at the data.\n\n# In[3]:\n\n\nprint(df.head())\n\n\n# The following can be observed about the column names:\n# - They are written in Portuguese and contain accentuation;\n# - They are all in upper case letters;\n# - They contain spaces and parenthesis.\n# \n# To facilitate readability we will modify the column names by:\n# - Replacing the spaces with underlines \"_\";\n# - Removing the parenthesis;\n# - Making all letters lowercase; and\n# - Removing the accents.\n# This convention is called snake_case and, even though not standard, it is frequently used. For more information, refer to: https://en.wikipedia.org/wiki/Snake_case\n\n# In[4]:\n\n\nprint(\"Column names before changes:\\n\")\nprint(df.columns)\n\ndf.columns = [unidecode.unidecode(z.lower())\n .replace(' ','_')\n .replace('(','')\n .replace(')','') \n for z in df.columns]\n\ndf.to_csv('3years.csv', sep=';', index=False)\n\nprint(\"Column names after changes:\\n\")\n\nprint(df.columns)\n\n\n# This looks better.\n# Let's add some new columns to this dataframe, to support our analysis:\n# - Since we are looking for a cronologic observation, it is insteresting to concatenate the calendar months and years into a single variable called 'data' (Portuguese for date. I am keeping Portuguese names for consistency).\n# - We can also infer the routes from the origin and destination airport variables (respectivelly called aeroporto_de_origem_sigla and aeroporto_de_destino_sigla). A variable named 'rota' (Portuguese for route) will be created to store the 'origin->destination' string. Another variable with the names of the airports (instead of the codes) will be created (and alled 'rota_nome') for readability (not everyone knows all airport codes).\n# - Finally, for 'rpk','ask','rtk' and 'atk' variables, let's substitute the NaN by zeroes.\n# \n\n# In[5]:\n\n\ndf['data'] = [str(x['ano']) + '-' + \"{:02}\".format(x['mes'])\n for index, x in df.iterrows()]\n\ndf['rota'] = [str(x['aeroporto_de_origem_sigla']) + '->' +\n str(x['aeroporto_de_destino_sigla'])\n for index, x in df.iterrows()]\n\ndf['rota_nome'] = [str(x['aeroporto_de_origem_nome']) + '->' +\n str(x['aeroporto_de_destino_nome'])\n for index, x in df.iterrows()]\n\ndf['load_factor'] = df['rpk']/df['ask']\n\n#df['rpk'] = df['rpk'].fillna(0)\n#df['ask'] = df['ask'].fillna(0)\n#df['rtk'] = df['rtk'].fillna(0)\n#df['atk'] = df['atk'].fillna(0)\n\n\n# Since there is no data dictionary, it is now a good time to talk about these variables:\n# - RPK meaning \"Revenue Passenger Kilometers\" is an air transport industry metric that aggregates the number of paying passengers and the quantity of kilometers traveled by them. It is calculated by multiplying the number of paying passengers by the distance traveled in kilometers.\n# - ASK meaning \"Available Seat Kilometers\" is similar to the RPK but instead of using the paying passengers, the passenger capacity (number of seats available in the aircraft) is multiplied by the traveled distance.\n# - RTK (for \"Revenue tonne kilometres\") measures the revenue cargo load in tonnes multiplied by the distance flown in kilometers.\n# - ATK (for \"Available tonne kilometres\") measures the aircraft capacity of cargo load in tonnes multiplied by the distance flown in kilometers.\n# \n# The dataframe presents not only the value of these parameters but also the variables that compose their formula. Therefore, let's make a consistency check, verifying it is possible to reproduce their values through the variables.\n\n# The formulas of the variables are:\n\n# $ RPK = \\frac{\\sum{PayingPassengers} \\ \\times \\ distance}{\\sum{flights}} $\n# \n# $ ASK = \\frac{\\sum{Seats} \\ \\times \\ distance}{\\sum{flights}} $\n# \n# $ RPK = \\frac{(AvgWeight \\ times \\ \\sum{PayingPassengers \\ + \\ BaggageWeight \\ + \\ CargoWeight \\ + \\ MailWeight) } \\ \\times \\ distance}{1000 \\times \\sum{flights}} $\n# \n# $ ASK = \\frac{\\sum{Payload} \\ \\times \\ distance}{1000 \\times \\sum{flights}} $\n\n# In[6]:\n\n\n#df['ask_calc'] = df['assentos']*df['distancia_voada_km']\n#df['rpk_calc'] = df['passageiros_pagos']*df['distancia_voada_km']\n\n\n# In[7]:\n\n\ndummy = []\nfor index, x in df.iterrows():\n if x['decolagens'] == 0:\n dummy.append(abs(x['rpk']) < 1000)\n else:\n dummy.append(abs(x['rpk'] - x['passageiros_pagos']*x['distancia_voada_km']/x['decolagens']) < 1000)\nprint('The number of rpk values that correspond to rpk calculation is: {:.2f}%'.format(100*sum(dummy)/len(dummy)))\ndf['rpk_calc']= df['passageiros_pagos']*df['distancia_voada_km']/df['decolagens']\ndel dummy\n\n\ndummy = []\nfor index, x in df.iterrows():\n if x['decolagens'] == 0:\n dummy.append(abs(x['ask']) < 1000)\n else:\n dummy.append(abs(x['ask'] - x['assentos']*x['distancia_voada_km']/x['decolagens']) < 1000)\nprint('The number of ask values that correspond to ask calculation is: {:.2f}%'.format(100*sum(dummy)/len(dummy)))\ndf['ask_calc']=df['assentos']*df['distancia_voada_km']/df['decolagens']\n\ndel dummy\n\ndummy = []\nfor index, x in df.iterrows():\n if x['decolagens'] == 0:\n dummy.append(abs(x['rtk']) < 1000)\n else:\n dummy.append(abs(x['rtk'] - (75*x['passageiros_pagos']+x['carga_paga_kg']+x['correio_kg']+x['bagagem_kg'] )*\n x['distancia_voada_km']/(1000*x['decolagens'])) < 1000)\nprint('The number of rtk values that correspond to rtk calculation is: {:.2f}%'.format(100*sum(dummy)/len(dummy)))\ndf['rtk_calc']=(75*df['passageiros_pagos']+df['carga_paga_kg']+df['correio_kg']+df['bagagem_kg']\n )*df['distancia_voada_km']/(1000*df['decolagens'])\n\ndel dummy\n\ndummy = []\nfor index, x in df.iterrows():\n if x['decolagens'] == 0:\n dummy.append(abs(x['atk']) < 1000)\n else:\n dummy.append(abs(x['atk'] - x['payload']*x['distancia_voada_km']/(1000*x['decolagens'])) < 1000)\nprint('The number of atk values that correspond to atk calculation is: {:.2f}%'.format(100*sum(dummy)/len(dummy)))\ndf['atk_calc']=df['payload']*df['distancia_voada_km']/(1000*df['decolagens'])\n\ndel dummy\n\n\n# We can see that the consistency is variable, and is specifically lower for RTK values.\n# \n# One clear disadvantage of the calculated RTK is that the same average weight (75 kg) was used for all passengers of all airlines. This assumption implies that Brazilian and foreign companies use (or have to use) the same value for passenger weight to do their flight planning, which may not be true.\n# \n# Let's observe if being a Brazilian airline or foreign airline has an effect in the relationship between reported RTK and calculated RTK:\n# \n# \n\n# In[8]:\n\n\nsns.scatterplot(x=df['rtk'],y=df['rtk_calc'],hue=df['empresa_nacionalidade'])\n\n\n# We can see clearly that the line y=x has many Brazilian airlines into it, but not foreign. Also, there is a second line below the y=x line, suggesting a different tendency for some foreign airlines.\n# \n# By optimizing the error between RKT and calculated RTK for Brazilian airlines and foreign airlines, we arrive at the following values:\n# - Brazilian airlines have 75kg as the best average value for passenger weight;\n# - Foreign airlines have 90kg as the best average value for passenger weight.\n# \n# These numbers come from an optimization exercise is found in the article below:\n# \n# ## PUT LINK HERE\n\n# With this knowledge, let's calculate again the RTK:\n\n# In[9]:\n\n\ndummy = []\nrtk_calc = []\nfor index, x in df.iterrows():\n if x['empresa_nacionalidade'] == 'BRASILEIRA':\n avgw = 75\n elif x['empresa_nacionalidade'] == 'ESTRANGEIRA':\n avgw = 90\n \n if x['decolagens'] == 0:\n rtk = float('NaN')\n dummy.append(abs(x['rtk']) < 1000)\n else:\n rtk = (avgw*x['passageiros_pagos']+x['carga_paga_kg']+x['correio_kg']+x['bagagem_kg']\n )*x['distancia_voada_km']/(1000*x['decolagens'])\n dummy.append(abs(x['rtk'] - rtk) < 1000)\n rtk_calc.append(rtk)\n\nprint('The number of rtk values that correspond to rtk calculation is: {:.2f}%'.format(100*sum(dummy)/len(dummy)))\ndf['rtk_calc'] = rtk_calc\n\ndel dummy, rtk_calc, rtk\n\n\n# We see now that the match of RTK values passed from 56.28% to 58.93%. Let's also reprint the previous graphic with the corrected calculated RTK.\n\n# In[10]:\n\n\nsns.scatterplot(x=df['rtk'],y=df['rtk_calc'],hue=df['empresa_nacionalidade'])\n\n\n# We can see that the second tendency line is gone, since we have took into consideration its behaviour in our model.\n\n# After the consistency check, let's take a look on how the number of flights has evolved through time in our database flights.\n\n# In[11]:\n\n\ndf1 = pd.DataFrame(df.groupby(by=['data','ano']).agg('sum')['decolagens'])\n\ndf1.reset_index(inplace=True)\nax = sns.catplot(x='data', y='decolagens', data=df1, kind='bar', hue='ano', height=6, aspect=10/6,\n sharey=True)\n\nax.set_xticklabels(rotation=90, ha=\"right\") \nax.fig.suptitle('# Flights per month')\n\n\n# We can see the number of flights diminishes drastically after March 2020. Interestingly enough, this should not be credited only to the pandemic effect, since January is the month with most flights in an year.\n\n# Let's verify what are the busiest airports (according to number of takeoffs) in Brazil, according to ANAC's data:\n\n# In[13]:\n\n\ndf2 = pd.DataFrame(df.groupby(by=['aeroporto_de_origem_nome']).agg('sum')['decolagens'])\ndf2 = df2.sort_values(by=['decolagens'], ascending=False)\nprint(df2[:10])\ndf2.reset_index(inplace=True)\n\nax = sns.catplot(x='aeroporto_de_origem_nome', y='decolagens',\n data=df2[:20], kind='bar', color='b', sharey=True)\n\nax.set_xticklabels(rotation=90, ha=\"right\")\nax.fig.suptitle('# Flights per airport')\n\n\n# In[14]:\n\n\ndf3 = df[df['aeroporto_de_origem_nome']=='GUARULHOS']\ndf3 = pd.DataFrame(df3.groupby(by=['data','ano']).agg('sum')['decolagens'])\ndf3.reset_index(inplace=True)\n\nax = sns.catplot(x='data', y='decolagens', data=df3, kind='bar', hue='ano', height=6, aspect=10/6, sharey=True)\n\nax.set_xticklabels(rotation=90, ha=\"right\")\nax.fig.suptitle('# Flights in Guarulhos airport per month')\n\n\n# Let's verify also if the impact of the travel restrictions affected Brazilian and foreign airlines differently.\n\n# In[15]:\n\n\ndf4 = pd.DataFrame(df.groupby(by=['data', 'empresa_nacionalidade']).agg('sum')['decolagens'])\n\ndf4[df4.index.isin(['ESTRANGEIRA'],level=1)]['decolagens'].values\n\ndf4.reset_index(inplace=True)\n\nax = sns.catplot(x='data', y='decolagens',\n data=df4, kind='bar', hue='empresa_nacionalidade', height=6, aspect=10/6, sharey=True)\n\nax.set_xticklabels(rotation=90, ha=\"right\")\nax.fig.suptitle('# Flights per Month - Brazilian and Foreign Airlines')\n\n\n# From the graphic above we can see that Brazilian airlines present most volatility on number of flights than international airlines.\n# \n# Let's now check the routes with most number of flights in the data set.\n\n# In[16]:\n\n\ndf5 = pd.DataFrame(df.groupby(by=['rota_nome']).agg('sum')['decolagens'])\ndf5 = df5.sort_values(by=['decolagens'], ascending=False)\ndf5.reset_index(inplace=True)\n\nax = sns.catplot(x='rota_nome', y='decolagens',\n data=df5[:20], kind='bar', color='b', height=6, aspect=10/6, sharey=True)\n\nax.set_xticklabels(rotation=90, ha=\"right\")\nax.fig.suptitle('# Flights per route')\n\n\n# We can see that even though Guarulhos (São Paulo international airport) is the busiest airport per number of flights in Brazil, it is not present in the Top 2 routes, which are between Rio de Janeiro and São Paulo local airports. Guarulhos, however is present in 10 of the Top 20 routes, which shows its relevancy as a national and international hub.\n\n# Let's now observe the temporal effect on the top 5 routes (per number of flights) in this dataframe:\n\n# In[17]:\n\n\ntoproutes = df5['rota_nome'][:5]\n\n\n# In[18]:\n\n\ndf6 = pd.DataFrame(df[df['rota_nome'].isin(list(toproutes))]\n .groupby(by=['data', 'rota_nome'])\n .agg('sum')['decolagens'])\ndf6.reset_index(inplace=True)\n\nax = sns.catplot(x='data', y='decolagens', #height=6, aspect=10/6, #hue='rota (nome)', \n col='rota_nome', data=df6, kind='bar', col_wrap=2,\n sharey=True)\n\nax.set_xticklabels(rotation=90, ha=\"right\")\n#ax.fig.suptitle('# TAKEOFFs per month - route')\n\n\n# Let's now check the top RPK routes.\n\n# In[28]:\n\n\ndf7 = pd.DataFrame(df.groupby(by=['rota_nome', 'empresa_nacionalidade'])\n .agg('sum')['rpk'])\n\ndf7 = df7.sort_values(by=['rpk'], ascending=False)\n\ndf7.reset_index(inplace=True)\n\nax = sns.catplot(x='rota_nome', y='rpk', #hue='rota_nome', col='empresa_nacionalidade',\n data=df7[0:20], kind='bar', color='b', #col_wrap=2,\n sharey=True)\n\nax.set_xticklabels(rotation=90, ha=\"right\")\nax.fig.suptitle('RPK per route')\n\ntoprpkroutes = df7['rota_nome'].loc[:19]\n\n\n# And now the evolution of the top RPK routes through the dates of this data set.\n\n# In[29]:\n\n\ndf8 = pd.DataFrame(df[df['rota_nome'].isin(list(toprpkroutes))]\n .groupby(by=['rota_nome', \n 'data']).agg(\n rpk=pd.NamedAgg('rpk','mean'),\n decolagens=pd.NamedAgg('decolagens','sum')\n ))\ndf8.reset_index(inplace=True) \n\nax = sns.catplot(x='data', y='rpk', #hue='decolagens',\n col='rota_nome',\n data=df8, kind='bar', col_wrap=3,\n sharey=True, height=6, aspect=1)\n\nax.set_xticklabels(rotation=90, ha=\"right\")\nax.fig.suptitle('RPK per month - route')\n\n\n# In[25]:\n\n\nax = sns.catplot(x='data', y='load_factor', #hue='decolagens',\n col='rota_nome',\n data=df9, kind='bar', col_wrap=2,\n sharey=True, height=6, aspect=1)\n\nax.set_xticklabels(rotation=90, ha=\"right\")\n\nax.set_xlabels(\"Year-Month\",fontsize=12)\nax.set_ylabels(\"Load Factor\",fontsize=12)\n#ax.fig.suptitle('test',fontsize=10, p)\n\n", "repo_name": "santanasan/ANAC-transport", "sub_path": "anac_eda.py", "file_name": "anac_eda.py", "file_ext": "py", "file_size_in_byte": 15428, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "pandas.concat", "line_number": 39, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "unidecode.unidecode", "line_number": 70, "usage_type": "call"}, {"api_name": "seaborn.scatterplot", "line_number": 198, "usage_type": "call"}, {"api_name": "seaborn.scatterplot", "line_number": 244, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 254, "usage_type": "call"}, {"api_name": "seaborn.catplot", "line_number": 257, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 271, "usage_type": "call"}, {"api_name": "seaborn.catplot", "line_number": 276, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 287, "usage_type": "call"}, {"api_name": "seaborn.catplot", "line_number": 290, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 301, "usage_type": "call"}, {"api_name": "seaborn.catplot", "line_number": 307, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 321, "usage_type": "call"}, {"api_name": "seaborn.catplot", "line_number": 325, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 345, "usage_type": "call"}, {"api_name": "seaborn.catplot", "line_number": 350, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 363, "usage_type": "call"}, {"api_name": "seaborn.catplot", "line_number": 370, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 385, "usage_type": "call"}, {"api_name": "pandas.NamedAgg", "line_number": 388, "usage_type": "call"}, {"api_name": "pandas.NamedAgg", "line_number": 389, "usage_type": "call"}, {"api_name": "seaborn.catplot", "line_number": 393, "usage_type": "call"}, {"api_name": "seaborn.catplot", "line_number": 405, "usage_type": "call"}]} +{"seq_id": "1676830223", "text": "from copy import deepcopy\nimport networkx as nx\nimport time\nimport logging\nfrom gurobi_utils.gurobi_subgraphs import basic_subgraph_check_result as subgraph_check\n\navailable_IDs = list(range(1, 10_000_000))\n\ndef get_max_degree_node(graph, nodes):\n degrees = [(n, graph.degree(n)) for n in nodes]\n max_degree = 0\n max_node = nodes[0] # get a default return\n for n, degree in degrees:\n if degree > max_degree:\n max_degree = degree\n max_node = n\n return max_node\n\nclass Node():\n \"\"\"Node for doing tree search on link problem.\n local_graph is a LocallyFGraph object\n children is a list of Node objects\n status can be one of the following:\n - 'TG' Terminal, good\n - 'IP' In progress\n - 'CI' Canceled, isomorphism\n - 'EC' Excessive children\n - 'TB' Terminal, bad\n - 'TL' Too large\"\"\"\n\n def __init__(self, local_graph, children = [], status = None, ID = 0):\n self.local_graph = local_graph\n self.children = children\n self.status = status\n self.ID = ID\n\n \n def __str__(self):\n node_desc = f\"\"\"\n ID: {self.ID}\n A node that has {len(self.children)} children and status {self.status}.\n The graph's order: {(self.local_graph.graph.order())}; \n {len(self.local_graph.finished_nodes_)} finished vertices: {self.local_graph.finished_nodes_}.\n \"\"\"\n return node_desc\n\n def update_status(self, finished_nodes):\n if self.status is None:\n # assume it's in progress...\n self.status = 'IP'\n else:\n if self.status == 'EC':\n return self.status\n if self.status == 'TL':\n return self.status\n if len(finished_nodes) == 0:\n return self.status\n for node in finished_nodes:\n if nx.is_isomorphic(node.local_graph.graph, self.local_graph.graph):\n self.status = 'CI'\n return self.status\n self.local_graph.finished_nodes()\n if self.local_graph.check_progress():\n if len(self.local_graph.unfinished_nodes()) == 0:\n print('Found a good finish.')\n self.status = 'TG'\n else:\n self.status = 'TB'\n \n for vertex in self.local_graph.graph.nodes:\n if not self.local_graph.check_neighborhood_status(vertex):\n self.status = 'TB'\n return self.status\n\n def set_status(self, status):\n self.status = status\n\n def expand(self, max_graph_order, max_children = 1000, resume = False, max_ID = None, time_limit = False):\n logging.debug(' Expanding...')\n if resume:\n global available_IDs\n remove_IDs = list(range(max_ID+1))\n # print(f'Need to remove IDs: {remove_IDs}')\n for i in remove_IDs:\n if i%500 == 0:\n logging.info(f'Removing ID {i}...')\n try:\n available_IDs.remove(i)\n except:\n logging.info(f\"{i} wasn't in the IDs. Moving on...\")\n self.local_graph.finished_nodes()\n next_vertex = get_max_degree_node(self.local_graph.graph, self.local_graph.unfinished_nodes())\n add_children = []\n logging.debug(' Getting the children...')\n new_children = self.get_children(next_vertex, max_graph_order, max_children, time_limit)\n logging.debug(f'checking {len(new_children)} children')\n for i in range(len(new_children)):\n add = True\n next_graph = new_children[i]\n for j in range(i+1, len(new_children)):\n check_graph = new_children[j]\n if nx.is_isomorphic(check_graph.local_graph.graph, next_graph.local_graph.graph):\n add = False\n if add:\n add_children.append(next_graph)\n self.children += add_children\n logging.debug(' Done expanding.')\n \n def reset_children(self):\n self.children = []\n\n def compare_node_lists(self, old, new):\n good = []\n i = 0\n for new_ in new:\n i +=1\n new_graph = True\n for old_ in old:\n if subgraph_check(old_.local_graph.graph, new_.local_graph.graph):\n new_graph = False\n continue\n if new_graph:\n good.append(new_)\n return good\n\n def get_children(self, node, max_graph_order, max_children = 1000, time_limit = False):\n if time_limit:\n start = time.time()\n children = []\n more_nodes_possible = self.local_graph.subgraph.order() - len(self.local_graph.get_neighbors(node))\n for add_nodes in range(more_nodes_possible + 1):\n if time_limit:\n time_remaining = time_limit - (time.time() - start)\n if time_remaining < 0:\n return children\n logging.debug(f' Getting nodes for adding {add_nodes} vertices; {time_remaining} seconds remaining.')\n self_copy = deepcopy(self)\n time_remaining = time_remaining if time_limit else time_limit\n possible_finishes = self_copy.local_graph.all_finishes(node, add_nodes, time_remaining)\n logging.debug(f' Got {len(possible_finishes)} possible finishes')\n num_checked = 1\n for pf in possible_finishes:\n child_ID = available_IDs.pop(0)\n next_graph = deepcopy(self_copy.local_graph)\n next_graph.apply_finish(pf)\n if next_graph.graph.order() > max_graph_order:\n next_node = Node(next_graph, children = [], status = 'TL', ID = child_ID)\n children.append(next_node)\n continue\n next_node = Node(next_graph, children = [], status = None, ID = child_ID)\n children.append(next_node)\n num_checked +=1 \n if len(children) > max_children:\n return children\n return children\n\n", "repo_name": "ehawb/diss", "sub_path": "TZ_tree_search/tree_search_utils/tree_search.py", "file_name": "tree_search.py", "file_ext": "py", "file_size_in_byte": 6098, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "94", "api": [{"api_name": "networkx.is_isomorphic", "line_number": 59, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 79, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 86, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 90, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 94, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 96, "usage_type": "call"}, {"api_name": "networkx.is_isomorphic", "line_number": 102, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 107, "usage_type": "call"}, {"api_name": "gurobi_utils.gurobi_subgraphs.basic_subgraph_check_result", "line_number": 119, "usage_type": "call"}, {"api_name": "time.time", "line_number": 128, "usage_type": "call"}, {"api_name": "time.time", "line_number": 133, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 136, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 137, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 140, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 144, "usage_type": "call"}]} +{"seq_id": "33839564920", "text": "#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n\n# For Kaggle TPU VM\n# !pip install transformers\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n\nprint('Import')\n\nimport os\nimport sys\nfrom argparse import ArgumentParser\nimport numpy as np\nimport tensorflow as tf\nprint('tf:', tf.__version__)\nimport transformers as tr\nprint('tr:', tr.__version__)\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n\nparser = ArgumentParser()\n\nparser.add_argument('--model_dir_or_name', default='Salesforce/codet5-base', type=str, help='Model directory or name')\nparser.add_argument('--data_tfrec_dir', default='/kaggle/input/ai4code-tfrec', type=str, help='Data directory with TFRecord files')\nparser.add_argument('--tpu', default='local', type=str, help='TPU GRPC/name, or local, or None')\nparser.add_argument('--job', default='train', type=str, help='Job to perform')\nparser.add_argument('--metric_name', default='mse', type=str, help='Metric name')\nparser.add_argument('--monitor', default='val_loss', type=str, help='Value to monitor')\nparser.add_argument('--monitor_mode', default='min', type=str, help='Monitor mode')\nparser.add_argument('--n_folds', default=5, type=int, help='Number of folds')\nparser.add_argument('--initial_fold', default=0, type=int, help='Initial fold (from 0)')\nparser.add_argument('--final_fold', default=2, type=int, help='Final fold. To train single fold set `initial_fold + 1`')\nparser.add_argument('--dim', default=1024, type=int, help='Max seq len')\nparser.add_argument('--n_examples_total', default=2_166_064, type=int, help='Total number of training examples')\nparser.add_argument('--n_epochs', default=50, type=int, help='Number of epochs to train (hard limit). Early stop is applied')\nparser.add_argument('--batch_size', default=64, type=int, help='Batch size')\nparser.add_argument('--lr', default=0.000_025, type=float, help='Learning rate')\n\nargs = parser.parse_args()\n# args = parser.parse_args([]) # to run in a notebook cell with default values\n\n# Number of sub-train examples i.e. all folds except one (e.g. 4/5 of full train)\nargs.n_examples_train = args.n_examples_total - (args.n_examples_total // args.n_folds)\nprint('Settings')\nfor a in sorted([a for a in vars(args) if '__' not in a]): print('%-20s %s' % (a, vars(args)[a]))\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n\ndef init_accel(tpu=None):\n \"\"\"\n Seamlessly init any accelerator: CPU, GPU, multi-GPU, TPU\n\n Parameters:\n tpu : str or None\n TPU node GRPC or name \n E.g. 'grpc://10.70.50.202:8470' or 'node-1' or 'local'\n\n Returns:\n strategy : \n Strategy\n \"\"\"\n try:\n tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect(tpu=tpu)\n strategy = tf.distribute.TPUStrategy(tpu)\n except tf.errors.NotFoundError:\n strategy = tf.distribute.MirroredStrategy()\n print('TPU was not found')\n print('Num replicas:', strategy.num_replicas_in_sync)\n return strategy\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n\ndef init_tfdata(files_glob, deterministic=True, batch_size=32, auto=-1, \n parse_example=None, repeat=False, buffer_size=None, \n cache=False, drop_remainder=False):\n \"\"\"\n Creates tf.data.TFRecordDataset with appropriate parameters\n\n Parameters:\n files_glob : str\n Glob wildcard for TFRecord files\n deterministic : bool\n batch_size : int\n auto : int\n Number of parallel reads/calls. -1 means automatic\n parse_example : callable\n Processing function\n repeat : bool\n Whether to repeat dataset\n buffer_size : int or None\n Shuffle buffer size. None means do not shuffle.\n cache : bool\n Whether to cache data\n drop_remainder : bool\n Whether to drop remainder\n\n Returns:\n ds : \n Initialized dataset\n \"\"\"\n options = tf.data.Options()\n options.experimental_deterministic = deterministic\n files = tf.data.Dataset.list_files(files_glob, shuffle=not deterministic).with_options(options)\n print('N tfrec files:', len(files))\n ds = tf.data.TFRecordDataset(files, num_parallel_reads=auto)\n ds = ds.with_options(options)\n ds = ds.map(parse_example, num_parallel_calls=auto)\n if repeat:\n ds = ds.repeat()\n if buffer_size:\n ds = ds.shuffle(buffer_size=buffer_size, reshuffle_each_iteration=True)\n ds = ds.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n ds = ds.prefetch(auto)\n if cache:\n ds = ds.cache()\n return ds\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n\nclass KeepLastCKPT(tf.keras.callbacks.Callback):\n \"\"\"\n Sorts and removes all ckpt except the last.\n Parameters:\n wildcard : str\n Wildcard for weight file names\n \"\"\"\n #\n def __init__(self, wildcard):\n super(KeepLastCKPT, self).__init__()\n self.wildcard = wildcard\n #\n def on_epoch_begin(self, epoch, logs=None):\n files = sorted(tf.io.gfile.glob(self.wildcard))\n if len(files):\n for file in files[:-1]:\n tf.io.gfile.remove(file)\n print('Kept ckpt: %s' % files[-1])\n else:\n print('No ckpt to keep')\n #\n def on_train_end(self, logs=None):\n files = sorted(tf.io.gfile.glob(self.wildcard))\n if len(files):\n for file in files[:-1]:\n tf.io.gfile.remove(file)\n print('\\nKept ckpt (final): %s' % files[-1])\n else:\n print('\\nNo ckpt to keep (final)')\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n\nfeature_description = {\n 'image': tf.io.FixedLenFeature([args.dim], tf.int64),\n 'label': tf.io.FixedLenFeature([], tf.float32),\n}\n\n\ndef parse_example(example_proto):\n \"\"\"\n Parse TFRec example\n \"\"\"\n d = tf.io.parse_single_example(example_proto, feature_description)\n image = tf.cast(d['image'], tf.int32)\n label = tf.cast(d['label'], tf.float32)\n return image, label\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n\ndef init_model(print_summary=True, from_pretrained=True):\n \"\"\"\n Init model with pretrained or random weights\n\n Parameters:\n print_summary : bool\n Whether to print model summary\n from_pretrained : bool\n Init model with petrained/random weights\n\n Returns:\n model : \n Initialized model\n \"\"\"\n if from_pretrained:\n transformer = tr.TFT5EncoderModel.from_pretrained(\n args.model_dir_or_name, from_pt=True)\n else:\n config = tr.AutoConfig.from_pretrained(args.model_dir_or_name)\n transformer = tr.TFT5EncoderModel.from_config(config)\n input_ids = tf.keras.layers.Input(shape=(args.dim,), dtype=tf.int32)\n sequence_output = transformer(input_ids)[0] # (batch, len, hidden)\n cls_token = sequence_output[:, 0, :] # (batch, hidden)\n out = tf.keras.layers.Dense(1, activation='linear')(cls_token)\n model = tf.keras.models.Model(inputs=input_ids, outputs=out)\n model.compile(optimizer=tf.keras.optimizers.Adam(args.lr), \n loss=tf.keras.losses.MeanAbsoluteError(), \n metrics=[args.metric_name])\n if print_summary:\n model.summary()\n return model\n\n#------------------------------------------------------------------------------\n# Run\n#------------------------------------------------------------------------------\n\nfor fold_id in range(args.initial_fold, args.final_fold):\n print('\\n*****')\n print('Fold:', fold_id)\n print('*****\\n')\n print('Clear session...')\n tf.keras.backend.clear_session()\n print('FULL BATCH SHAPE: %d x %d' % (args.batch_size, args.dim,))\n print('LR: %.8f' % args.lr)\n print('Init TPU')\n strategy = init_accel(args.tpu)\n #----------------------------------------------------------------------\n # Globs\n all_fold_ids = np.array(range(args.n_folds))\n train_fold_ids = all_fold_ids[all_fold_ids != fold_id]\n train_glob = os.path.join(args.data_tfrec_dir, ('fold.[' + '%d'*(args.n_folds-1) + '].tfrecord*') % tuple(train_fold_ids))\n val_glob = os.path.join(args.data_tfrec_dir, 'fold.[%d].tfrecord*' % fold_id)\n print('TRAIN GLOB:', train_glob)\n print('VAL GLOB:', val_glob)\n #----------------------------------------------------------------------\n print('Init datasets')\n train_ds = init_tfdata(train_glob, \n deterministic=False, \n batch_size=args.batch_size, \n auto=-1,\n parse_example=parse_example, \n repeat=True,\n buffer_size=2048, \n drop_remainder=False,\n cache=False)\n val_ds = init_tfdata(val_glob, \n deterministic=True, \n batch_size=args.batch_size * 2, \n auto=-1,\n parse_example=parse_example,\n repeat=False, \n buffer_size=None,\n drop_remainder=False,\n cache=False)\n #----------------------------------------------------------------------\n print('Init model')\n with strategy.scope():\n model = init_model(print_summary=True, from_pretrained='train' in args.job)\n #----------------------------------------------------------------------\n print('Init callbacks')\n call_ckpt = tf.keras.callbacks.ModelCheckpoint('model-f%d-e{epoch:03d}-{val_loss:.4f}-{val_%s:.4f}.h5' % (fold_id, args.metric_name),\n monitor=args.monitor,\n save_best_only=True,\n save_weights_only=True,\n mode=args.monitor_mode,\n verbose=1)\n call_reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor=args.monitor,\n factor=0.5, \n patience=2, \n min_delta=1e-4,\n min_lr=1e-8,\n verbose=1,\n mode=args.monitor_mode)\n call_early_stop = tf.keras.callbacks.EarlyStopping(monitor=args.monitor,\n patience=4,\n min_delta=1e-4,\n mode=args.monitor_mode,\n verbose=1)\n call_keep_last = KeepLastCKPT(wildcard='model-f%d-e*.h5' % fold_id)\n #----------------------------------------------------------------------\n if 'train' in args.job:\n print('Fit (fold %d)' % fold_id)\n h = model.fit(\n train_ds,\n steps_per_epoch=args.n_examples_train // args.batch_size,\n epochs=args.n_epochs,\n initial_epoch=0,\n validation_data=val_ds,\n callbacks=[call_ckpt,\n call_reduce_lr,\n call_early_stop,\n call_keep_last,])\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n\n\n\n\n", "repo_name": "vecxoz/ai4code", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 12791, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "tensorflow.__version__", "line_number": 17, "usage_type": "attribute"}, {"api_name": "transformers.__version__", "line_number": 19, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 24, "usage_type": "call"}, {"api_name": "tensorflow.distribute.cluster_resolver.TPUClusterResolver.connect", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow.distribute", "line_number": 67, "usage_type": "attribute"}, {"api_name": "tensorflow.distribute.TPUStrategy", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.distribute", "line_number": 68, "usage_type": "attribute"}, {"api_name": "tensorflow.errors", "line_number": 69, "usage_type": "attribute"}, {"api_name": "tensorflow.distribute.MirroredStrategy", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.distribute", "line_number": 70, "usage_type": "attribute"}, {"api_name": "tensorflow.data.Options", "line_number": 106, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 106, "usage_type": "attribute"}, {"api_name": "tensorflow.data.Dataset.list_files", "line_number": 108, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 108, "usage_type": "attribute"}, {"api_name": "tensorflow.data.TFRecordDataset", "line_number": 110, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 110, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 126, "usage_type": "attribute"}, {"api_name": "tensorflow.io.gfile.glob", "line_number": 139, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 139, "usage_type": "attribute"}, {"api_name": "tensorflow.io.gfile.remove", "line_number": 142, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 142, "usage_type": "attribute"}, {"api_name": "tensorflow.io.gfile.glob", "line_number": 148, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 148, "usage_type": "attribute"}, {"api_name": "tensorflow.io.gfile.remove", "line_number": 151, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 151, "usage_type": "attribute"}, {"api_name": "tensorflow.io.FixedLenFeature", "line_number": 160, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 160, "usage_type": "attribute"}, {"api_name": "tensorflow.int64", "line_number": 160, "usage_type": "attribute"}, {"api_name": "tensorflow.io.FixedLenFeature", "line_number": 161, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 161, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 161, "usage_type": "attribute"}, {"api_name": "tensorflow.io.parse_single_example", "line_number": 169, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 169, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 170, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 170, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 171, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 171, "usage_type": "attribute"}, {"api_name": "transformers.TFT5EncoderModel.from_pretrained", "line_number": 192, "usage_type": "call"}, {"api_name": "transformers.TFT5EncoderModel", "line_number": 192, "usage_type": "attribute"}, {"api_name": "transformers.AutoConfig.from_pretrained", "line_number": 195, "usage_type": "call"}, {"api_name": "transformers.AutoConfig", "line_number": 195, "usage_type": "attribute"}, {"api_name": "transformers.TFT5EncoderModel.from_config", "line_number": 196, "usage_type": "call"}, {"api_name": "transformers.TFT5EncoderModel", "line_number": 196, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Input", "line_number": 197, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 197, "usage_type": "attribute"}, {"api_name": "tensorflow.int32", "line_number": 197, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 200, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 200, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.models.Model", "line_number": 201, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 201, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 202, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 202, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.losses.MeanAbsoluteError", "line_number": 203, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 203, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.backend.clear_session", "line_number": 218, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 218, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 225, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 227, "usage_type": "call"}, {"api_name": "os.path", "line_number": 227, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 228, "usage_type": "call"}, {"api_name": "os.path", "line_number": 228, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.callbacks.ModelCheckpoint", "line_number": 257, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 257, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.callbacks.ReduceLROnPlateau", "line_number": 263, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 263, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.callbacks.EarlyStopping", "line_number": 270, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 270, "usage_type": "attribute"}]} +{"seq_id": "17204007957", "text": "from app.centrality import Centrality\nimport numpy as np\nfrom numpy import unravel_index\nimport copy\nimport gmatch4py as gm\nimport networkx as nx\nfrom fuzzywuzzy import fuzz\nimport segeval\nimport requests\nfrom bs4 import BeautifulSoup, SoupStrainer\nimport bs4\n\nclass Aifsim:\n @staticmethod\n def get_graph(aif_id, cent):\n dir_path = 'http://www.aifdb.org/json/' + str(aif_id)\n graph, json = cent.get_graph_url(dir_path)\n return graph, json\n @staticmethod\n def is_iat(g, g1, centra):\n l_nodes = centra.get_l_node_list(g)\n l1_nodes = centra.get_l_node_list(g1)\n\n if len(l_nodes) < 1 and len(l1_nodes) < 1:\n return 'aif'\n elif len(l_nodes) > 1 and len(l1_nodes) > 1:\n return 'iat'\n else:\n return 'diff'\n\n @staticmethod\n def get_normalized_edit_distance(g1, g2, label_equal, attr_name):\n if label_equal:\n dist = nx.algorithms.similarity.optimize_graph_edit_distance(g1, g2, node_match=lambda a,b: a[attr_name] == b[attr_name])\n else:\n dist = nx.algorithms.similarity.optimize_graph_edit_distance(g1, g2)\n\n max_g_len = max(len(g1.nodes),len(g2.nodes))\n ed_dist = min(list(dist))\n\n\n norm_ed_dist = (max_g_len - ed_dist) / max_g_len\n\n return norm_ed_dist\n\n\n @staticmethod\n def get_normalized_path_edit_distance(g1, g2, label_equal, attr_name):\n if label_equal:\n dist = nx.algorithms.similarity.optimize_edit_paths(g1, g2, node_match=lambda a,b: a[attr_name] == b[attr_name])\n else:\n dist = nx.algorithms.similarity.optimize_edit_paths(g1, g2)\n\n max_g_len = max(len(g1.nodes),len(g2.nodes))\n ed_dist = min(list(dist))\n\n\n norm_ed_dist = (max_g_len - ed_dist) / max_g_len\n\n return norm_ed_dist\n\n\n @staticmethod\n def get_s_nodes(g):\n s_nodes = [x for x,y in g.nodes(data=True) if y['type']=='RA' or y['type']=='CA' or y['type']=='MA' or y['type']=='PA']\n not_s_nodes = [x for x,y in g.nodes(data=True) if y['type']!='RA' and y['type']!='CA' and y['type']!='MA' and y['type']!='PA']\n return s_nodes, not_s_nodes\n\n\n @staticmethod\n def get_i_s_nodes(g):\n i_s_nodes = [x for x,y in g.nodes(data=True) if y['type']=='I' or y['type']=='RA' or y['type']=='CA' or y['type']=='MA' or y['type']=='PA']\n not_i_s_nodes = [x for x,y in g.nodes(data=True) if y['type']!='I' and y['type']!='RA' and y['type']!='CA' and y['type']!='MA' and y['type']!='PA']\n return i_s_nodes, not_i_s_nodes\n\n\n @staticmethod\n def get_l_nodes(g):\n l_nodes = [x for x,y in g.nodes(data=True) if y['type']=='L']\n not_l_nodes = [x for x,y in g.nodes(data=True) if y['type']!='L']\n return l_nodes, not_l_nodes\n\n\n @staticmethod\n def get_l_ta_nodes(g):\n l_ta_nodes = [x for x,y in g.nodes(data=True) if y['type']=='L' or y['type']=='TA']\n not_l_ta_nodes = [x for x,y in g.nodes(data=True) if y['type']!='L' and y['type']!='TA']\n return l_ta_nodes, not_l_ta_nodes\n\n\n @staticmethod\n def get_i_nodes(g):\n i_nodes = [x for x,y in g.nodes(data=True) if y['type']=='I']\n not_i_nodes = [x for x,y in g.nodes(data=True) if y['type']!='I']\n return i_nodes, not_i_nodes\n\n\n @staticmethod\n def get_ya_nodes(g):\n ya_nodes = [x for x,y in g.nodes(data=True) if y['type']=='YA']\n not_ya_nodes = [x for x,y in g.nodes(data=True) if y['type']!='YA']\n return ya_nodes, not_ya_nodes\n\n\n @staticmethod\n def get_ta_nodes(g):\n ta_nodes = [x for x,y in g.nodes(data=True) if y['type']=='TA']\n not_ta_nodes = [x for x,y in g.nodes(data=True) if y['type']!='TA']\n return ta_nodes, not_ta_nodes\n\n\n @staticmethod\n def get_l_ya_nodes(g):\n l_ya_nodes = [x for x,y in g.nodes(data=True) if y['type']=='L' or y['type']=='YA']\n not_l_ya_nodes = [x for x,y in g.nodes(data=True) if y['type']!='L' and y['type']!='YA']\n return l_ya_nodes, not_l_ya_nodes\n\n\n @staticmethod\n def get_l_i_ya_nodes(g):\n l_i_ya_nodes = [x for x,y in g.nodes(data=True) if y['type']=='L' or y['type']=='YA' or y['type']=='I']\n not_l_i_ya_nodes = [x for x,y in g.nodes(data=True) if y['type']!='L' and y['type']!='YA' and y['type']!='I']\n return l_i_ya_nodes, not_l_i_ya_nodes\n\n\n @staticmethod\n def get_l_ta_ya_nodes(g):\n l_ta_ya_nodes = [x for x,y in g.nodes(data=True) if y['type']=='L' or y['type']=='YA' or y['type']=='TA']\n not_l_ta_ya_nodes = [x for x,y in g.nodes(data=True) if y['type']!='L' and y['type']!='YA' and y['type']!='TA']\n return l_ta_ya_nodes, not_l_ta_ya_nodes\n\n\n @staticmethod\n def get_i_s_ya_nodes(g):\n i_s_ya_nodes = [x for x,y in g.nodes(data=True) if y['type']=='I' or y['type']=='RA' or y['type']=='CA' or y['type']=='MA' or y['type']=='PA' or y['type']=='YA']\n not_i_s_ya_nodes = [x for x,y in g.nodes(data=True) if y['type']!='I' and y['type']!='RA' and y['type']!='CA' and y['type']!='MA' and y['type']!='PA' and y['type']!='YA']\n return i_s_ya_nodes, not_i_s_ya_nodes\n\n\n @staticmethod\n def remove_nodes(graph, remove_list):\n graph.remove_nodes_from(remove_list)\n return graph\n\n\n @staticmethod\n def get_i_node_sim(g1, g2):\n g1_c = g1.copy()\n g2_c = g2.copy()\n aifsim = Aifsim()\n g1_i_nodes, g1_not_i_nodes = aifsim.get_i_nodes(g1_c)\n new_g1 = aifsim.remove_nodes(g1_c, g1_not_i_nodes)\n g2_i_nodes, g2_not_i_nodes = aifsim.get_i_nodes(g2_c)\n new_g2 = aifsim.remove_nodes(g2_c, g2_not_i_nodes)\n ed = aifsim.get_normalized_gm_edit_distance(new_g1, new_g2, False, '')\n return ed\n\n\n @staticmethod\n def get_s_node_sim(g1, g2):\n g1_c = g1.copy()\n g2_c = g2.copy()\n aifsim = Aifsim()\n g1_nodes, g1_not_nodes = aifsim.get_s_nodes(g1_c)\n new_g1 = aifsim.remove_nodes(g1_c, g1_not_nodes)\n g2_nodes, g2_not_nodes = aifsim.get_s_nodes(g2_c)\n new_g2 = aifsim.remove_nodes(g2_c, g2_not_nodes)\n ed = aifsim.get_normalized_gm_edit_distance(new_g1, new_g2, False, '')\n return ed\n\n\n @staticmethod\n def get_i_s_node_sim(g1, g2):\n g1_c = g1.copy()\n g2_c = g2.copy()\n aifsim = Aifsim()\n g1_nodes, g1_not_nodes = aifsim.get_i_s_nodes(g1_c)\n new_g1 = aifsim.remove_nodes(g1_c, g1_not_nodes)\n g2_nodes, g2_not_nodes = aifsim.get_i_s_nodes(g2_c)\n new_g2 = aifsim.remove_nodes(g2_c, g2_not_nodes)\n ed = aifsim.get_normalized_gm_edit_distance(new_g1, new_g2, True, 'type')\n return ed\n\n\n @staticmethod\n def get_l_node_sim(g1, g2):\n g1_c = g1.copy()\n g2_c = g2.copy()\n aifsim = Aifsim()\n g1_nodes, g1_not_nodes = aifsim.get_l_nodes(g1_c)\n new_g1 = aifsim.remove_nodes(g1_c, g1_not_nodes)\n g2_nodes, g2_not_nodes = aifsim.get_l_nodes(g2_c)\n new_g2 = aifsim.remove_nodes(g2_c, g2_not_nodes)\n ed = aifsim.get_normalized_gm_edit_distance(new_g1, new_g2, False, 'type')\n return ed\n\n\n @staticmethod\n def get_l_ta_node_sim(g1, g2):\n g1_c = g1.copy()\n g2_c = g2.copy()\n aifsim = Aifsim()\n g1_nodes, g1_not_nodes = aifsim.get_l_ta_nodes(g1_c)\n new_g1 = aifsim.remove_nodes(g1_c, g1_not_nodes)\n g2_nodes, g2_not_nodes = aifsim.get_l_ta_nodes(g2_c)\n new_g2 = aifsim.remove_nodes(g2_c, g2_not_nodes)\n ed = aifsim.get_normalized_gm_edit_distance(new_g1, new_g2, False, 'type')\n return ed\n\n\n @staticmethod\n def get_ya_node_sim(g1, g2):\n g1_c = g1.copy()\n g2_c = g2.copy()\n aifsim = Aifsim()\n\n g1_nodes, g1_not_nodes = aifsim.get_ya_nodes(g1_c)\n new_g1 = aifsim.remove_nodes(g1_c, g1_not_nodes)\n g2_nodes, g2_not_nodes = aifsim.get_ya_nodes(g2_c)\n new_g2 = aifsim.remove_nodes(g2_c, g2_not_nodes)\n ed = aifsim.get_normalized_gm_edit_distance(new_g1, new_g2, True, 'text')\n return ed\n\n\n @staticmethod\n def get_ya_l_node_sim(g1, g2):\n g1_c = g1.copy()\n g2_c = g2.copy()\n aifsim = Aifsim()\n\n g1_nodes, g1_not_nodes = aifsim.get_l_ya_nodes(g1_c)\n new_g1 = aifsim.remove_nodes(g1_c, g1_not_nodes)\n g2_nodes, g2_not_nodes = aifsim.get_l_ya_nodes(g2_c)\n new_g2 = aifsim.remove_nodes(g2_c, g2_not_nodes)\n ed = aifsim.get_normalized_gm_edit_distance(new_g1, new_g2, True, 'text')\n return ed\n\n\n @staticmethod\n def get_ta_node_sim(g1, g2):\n g1_c = g1.copy()\n g2_c = g2.copy()\n aifsim = Aifsim()\n\n g1_nodes, g1_not_nodes = aifsim.get_ta_nodes(g1_c)\n new_g1 = aifsim.remove_nodes(g1_c, g1_not_nodes)\n g2_nodes, g2_not_nodes = aifsim.get_ta_nodes(g2_c)\n new_g2 = aifsim.remove_nodes(g2_c, g2_not_nodes)\n ed = aifsim.get_normalized_gm_edit_distance(new_g1, new_g2, True, 'text')\n return ed\n\n\n @staticmethod\n def get_ya_l_i_node_sim(g1, g2):\n g1_c = g1.copy()\n g2_c = g2.copy()\n aifsim = Aifsim()\n\n g1_nodes, g1_not_nodes = aifsim.get_l_i_ya_nodes(g1_c)\n new_g1 = aifsim.remove_nodes(g1_c, g1_not_nodes)\n g2_nodes, g2_not_nodes = aifsim.get_l_i_ya_nodes(g2_c)\n new_g2 = aifsim.remove_nodes(g2_c, g2_not_nodes)\n ed = aifsim.get_normalized_gm_edit_distance(new_g1, new_g2, True, 'text')\n return ed\n\n\n @staticmethod\n def get_l_ta_ya_node_sim(g1, g2):\n g1_c = g1.copy()\n g2_c = g2.copy()\n aifsim = Aifsim()\n\n g1_nodes, g1_not_nodes = aifsim.get_l_ta_ya_nodes(g1_c)\n new_g1 = aifsim.remove_nodes(g1_c, g1_not_nodes)\n g2_nodes, g2_not_nodes = aifsim.get_l_ta_ya_nodes(g2_c)\n new_g2 = aifsim.remove_nodes(g2_c, g2_not_nodes)\n ed = aifsim.get_normalized_gm_edit_distance(new_g1, new_g2, False, 'type')\n return ed\n\n\n @staticmethod\n def get_i_s_ya_node_sim(g1, g2):\n g1_c = g1.copy()\n g2_c = g2.copy()\n aifsim = Aifsim()\n\n g1_nodes, g1_not_nodes = aifsim.get_i_s_ya_nodes(g1_c)\n new_g1 = aifsim.remove_nodes(g1_c, g1_not_nodes)\n g2_nodes, g2_not_nodes = aifsim.get_i_s_ya_nodes(g2_c)\n new_g2 = aifsim.remove_nodes(g2_c, g2_not_nodes)\n ed = aifsim.get_normalized_gm_edit_distance(new_g1, new_g2, True, 'type')\n return ed\n\n\n @staticmethod\n def findMean(a, N):\n\n summ = 0\n\n # total sum calculation of matrix\n for i in range(N):\n for j in range(N):\n summ += a[i][j]\n\n return summ/(N*N)\n\n\n @staticmethod\n def get_normalized_gm_edit_distance(g1, g2, label_equal, attr_name):\n ged=gm.GraphEditDistance(1,1,1,1)\n\n if label_equal:\n ged.set_attr_graph_used(attr_name, None)\n result=ged.compare([g1,g2],None)\n else:\n result=ged.compare([g1,g2],None)\n\n sim = ged.similarity(result)\n flat_sim = sim.flatten()\n flat_sim = flat_sim[flat_sim!=0]\n #print(flat_sim)\n norm_ed_dist = min(flat_sim)\n #norm_ed_dist = findMean(sim, 2)\n #norm_ed_dist = (sim[0][1] + sim[1][0])/2\n #print(sim)\n return norm_ed_dist\n\n\n @staticmethod\n def call_diagram_parts_and_sum(g_copy, g1_copy, rep):\n aifsim = Aifsim()\n if rep == 'aif':\n i_sim = aifsim.get_i_node_sim(g_copy, g1_copy)\n s_sim = aifsim.get_s_node_sim(g_copy, g1_copy)\n i_s_sim = aifsim.get_i_s_node_sim(g_copy, g1_copy)\n sum_list = [i_sim, s_sim, i_s_sim]\n else:\n i_sim = aifsim.get_i_node_sim(g_copy, g1_copy)\n s_sim = aifsim.get_s_node_sim(g_copy, g1_copy)\n i_s_sim = aifsim.get_i_s_node_sim(g_copy, g1_copy)\n\n i_s_ya_sim = aifsim.get_i_s_ya_node_sim(g_copy, g1_copy)\n l_sim = aifsim.get_l_node_sim(g_copy, g1_copy)\n l_ta_sim = aifsim.get_l_ta_node_sim(g_copy, g1_copy)\n ya_sim = aifsim.get_ya_node_sim(g_copy, g1_copy)\n ta_sim = aifsim.get_ta_node_sim(g_copy, g1_copy)\n l_i_ya_sim = aifsim.get_ya_l_i_node_sim(g_copy, g1_copy)\n l_ta_ya_sim = aifsim.get_l_ta_ya_node_sim(g_copy, g1_copy)\n l_ta_ya_sim = aifsim.get_ya_l_node_sim(g_copy, g1_copy)\n sum_list = [i_sim, s_sim, i_s_sim, i_s_ya_sim, l_sim, l_ta_sim, ya_sim, ta_sim, l_i_ya_sim, l_ta_ya_sim, l_ta_ya_sim]\n sum_tot = sum(sum_list)\n tot = sum_tot/len(sum_list)\n sum_list = np.asarray(sum_list)\n harm = len(sum_list) / np.sum(1.0/sum_list)\n return tot\n\n\n\n @staticmethod\n def text_sim_matrix(g_list, g1_list):\n aifsim = Aifsim()\n g_size = len(g_list)\n g1_size = len(g1_list)\n\n switch_flag = False\n\n\n if g_size >= g1_size:\n mat = aifsim.loop_nodes(g_list, g1_list)\n rels, vals = aifsim.select_max_vals(mat, g1_size, g_list, g1_list)\n else:\n switch_flag = True\n mat = aifsim.loop_nodes(g1_list, g_list)\n rels, vals = aifsim.select_max_vals(mat, g_size, g1_list, g_list)\n\n return rels, vals, switch_flag\n\n\n @staticmethod\n def loop_nodes(g_list, g1_list):\n matrix = np.zeros((len(g_list), len(g1_list)))\n for i, node in enumerate(g_list):\n text = node[1]\n text = text.lower()\n for i1, node1 in enumerate(g1_list):\n\n text1 = node1[1]\n text1 = text1.lower()\n #lev_val = normalized_levenshtein.distance(text, text1)\n lev_val = (fuzz.ratio(text, text1))/100\n matrix[i][i1] = lev_val\n\n return matrix\n\n\n\n\n @staticmethod\n def select_max_vals(matrix, smallest_value, g_list, g1_list):\n counter = 0\n lev_vals = []\n lev_rels = []\n index_list = list(range(len(g_list)))\n m_copy = copy.deepcopy(matrix)\n while counter <= smallest_value - 1:\n index_tup = unravel_index(m_copy.argmax(), m_copy.shape)\n #matrix[index_tup[0]][index_tup[1]] = -9999999\n m_copy[index_tup[0]] = 0 # zeroes out row i\n m_copy[:,index_tup[1]] = 0 # zeroes out column i\n lev_rels.append((g_list[index_tup[0]],g1_list[index_tup[1]]))\n lev_vals.append(matrix[index_tup[0]][index_tup[1]])\n index_list.remove(index_tup[0])\n counter = counter + 1\n for vals in index_list:\n lev_rels.append((g_list[vals],(0,'')))\n lev_vals.append(0)\n return lev_rels, lev_vals\n\n\n @staticmethod\n def get_mean_of_list(a):\n val_tot = sum(a)\n tot = val_tot/len(a)\n return tot\n\n\n @staticmethod\n def get_l_i_mean(l, i):\n return (l+i)/2\n\n\n @staticmethod\n def get_graph_sim(aif_id1, aif_id2):\n centra = Centrality()\n aifsim = Aifsim()\n graph, json = aifsim.get_graph(aif_id1, centra)\n graph1, json1 = aifsim.get_graph(aif_id2, centra)\n graph = centra.remove_iso_analyst_nodes(graph)\n graph1 = centra.remove_iso_analyst_nodes(graph1)\n rep_form = aifsim.is_iat(graph, graph1, centra)\n g_copy = graph.copy()\n g1_copy = graph1.copy()\n graph_mean = 0\n text_mean = 0\n overall_mean = 0\n if rep_form == 'diff':\n return 'Error'\n else:\n graph_mean = aifsim.call_diagram_parts_and_sum(g_copy, g1_copy, rep_form)\n if rep_form == 'aif':\n g_inodes = centra.get_i_node_list(g_copy)\n g1_inodes = centra.get_i_node_list(g1_copy)\n relsi, valsi, switched = aifsim.text_sim_matrix(g_inodes, g1_inodes)\n i_mean = aifsim.get_mean_of_list(valsi)\n text_mean = i_mean\n else:\n g_inodes = centra.get_i_node_list(g_copy)\n g1_inodes = centra.get_i_node_list(g1_copy)\n g_lnodes = centra.get_l_node_list(g_copy)\n g1_lnodes = centra.get_l_node_list(g1_copy)\n relsi, valsi, switched = aifsim.text_sim_matrix(g_inodes, g1_inodes)\n relsl, valsl, switched = aifsim.text_sim_matrix(g_lnodes, g1_lnodes)\n i_mean = aifsim.get_mean_of_list(valsi)\n l_mean = aifsim.get_mean_of_list(valsl)\n text_mean = aifsim.get_l_i_mean(l_mean, i_mean)\n\n overall_score = aifsim.get_l_i_mean(text_mean, graph_mean)\n return overall_score, text_mean, graph_mean\n\n\n @staticmethod\n def remove_html_tags(xml_soup):\n for match in xml_soup.findAll('div'):\n match.replaceWithChildren()\n for match in xml_soup.findAll('p'):\n match.replaceWithChildren()\n for match in xml_soup.findAll('br'):\n match.replaceWithChildren()\n\n return xml_soup\n\n @staticmethod\n def get_segements(xml_soup):\n segment_list = []\n for i, tag in enumerate(xml_soup):\n boundary_counter = i + 1\n tag_text = ''\n if 'span' in str(tag):\n tag_text = tag.text\n else:\n tag_text = str(tag)\n\n words = tag_text.split()\n seg_len = len(words)\n segment_list += seg_len * [boundary_counter]\n return segment_list\n\n @staticmethod\n def check_segment_length(seg_1, seg_2):\n seg_1_len = len(seg_1)\n seg_2_len = len(seg_2)\n\n if seg_1_len == seg_2_len:\n return True\n else:\n return False\n\n @staticmethod\n def get_similarity(text_1, text_2):\n #text_1 and text_2 are xml data that uses spans to seperate boundaries\n #e.g. BOSTON, MA ... <span class=\"highlighted\" id=\"634541\">Steven L.\n #Davis pled guilty yesterday to federal charges that he stole and disclosed trade secrets of The Gillette Company</span>.\n\n if text_1 == '' or text_2 == '':\n return 'Error Text Input Is Empty'\n else:\n\n xml_soup_1 = BeautifulSoup(text_1)\n xml_soup_2 = BeautifulSoup(text_2)\n xml_soup_1 = Aifsim.remove_html_tags(xml_soup_1)\n xml_soup_2 = Aifsim.remove_html_tags(xml_soup_2)\n\n segements_1 = Aifsim.get_segements(xml_soup_1)\n segements_2 = Aifsim.get_segements(xml_soup_2)\n\n seg_check = Aifsim.check_segment_length(segements_1, segements_2)\n\n if not seg_check:\n return 'Error Source Text Was Different'\n\n masses_1 = segeval.convert_positions_to_masses(segements_1)\n masses_2 = segeval.convert_positions_to_masses(segements_2)\n\n ss = segeval.segmentation_similarity(masses_1, masses_2)\n ss = float(ss)\n pk = segeval.pk(masses_1, masses_2)\n pk = 1 - float(ss)\n win_diff = segeval.window_diff(masses_1, masses_2)\n win_diff = 1 - float(win_diff)\n\n return str(ss), str(pk), str(win_diff)\n @staticmethod\n def rels_to_dict(rels, switched):\n new_list = []\n for rel in rels:\n\n id_1 = rel[0][0]\n id_2 = rel[1][0]\n text_1 = rel[0][1]\n text_2 = rel[1][1]\n\n if switched:\n\n mat_dict = {'ID1': id_2, 'ID2': id_1, 'text1': text_2, 'text2': text_1}\n else:\n mat_dict = {'ID1': id_1, 'ID2': id_2, 'text1': text_1, 'text2': text_2}\n new_list.append(mat_dict)\n return new_list\n\n @staticmethod\n def get_loc_sim_matrix(graph, graph1):\n centra = Centrality()\n aifsim = Aifsim()\n\n\n g_copy = graph.copy()\n g1_copy = graph1.copy()\n\n\n g_lnodes = centra.get_l_node_list(g_copy)\n g1_lnodes = centra.get_l_node_list(g1_copy)\n relsl, valsl, switched = aifsim.text_sim_matrix(g_lnodes, g1_lnodes)\n\n rels_dict = aifsim.rels_to_dict(relsl, switched)\n\n return rels_dict\n\n @staticmethod\n def get_prop_sim_matrix(graph, graph1):\n centra = Centrality()\n aifsim = Aifsim()\n\n\n g_copy = graph.copy()\n g1_copy = graph1.copy()\n\n\n g_inodes = centra.get_i_node_list(g_copy)\n g1_inodes = centra.get_i_node_list(g1_copy)\n relsi, valsi, switched = aifsim.text_sim_matrix(g_inodes, g1_inodes)\n\n #if switched the relations have been switched order so they need reversed when creating the dictionary\n\n rels_dict = aifsim.rels_to_dict(relsi, switched)\n\n return rels_dict\n", "repo_name": "roryduthie/CASS", "sub_path": "app/aifsim.py", "file_name": "aifsim.py", "file_ext": "py", "file_size_in_byte": 20431, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "97", "api": [{"api_name": "networkx.algorithms.similarity.optimize_graph_edit_distance", "line_number": 34, "usage_type": "call"}, {"api_name": "networkx.algorithms", "line_number": 34, "usage_type": "attribute"}, {"api_name": "networkx.algorithms.similarity.optimize_graph_edit_distance", "line_number": 36, "usage_type": "call"}, {"api_name": "networkx.algorithms", "line_number": 36, "usage_type": "attribute"}, {"api_name": "networkx.algorithms.similarity.optimize_edit_paths", "line_number": 50, "usage_type": "call"}, {"api_name": "networkx.algorithms", "line_number": 50, "usage_type": "attribute"}, {"api_name": "networkx.algorithms.similarity.optimize_edit_paths", "line_number": 52, "usage_type": "call"}, {"api_name": "networkx.algorithms", "line_number": 52, "usage_type": "attribute"}, {"api_name": "gmatch4py.GraphEditDistance", "line_number": 310, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 353, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 354, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 381, "usage_type": "call"}, {"api_name": "fuzzywuzzy.fuzz.ratio", "line_number": 390, "usage_type": "call"}, {"api_name": "fuzzywuzzy.fuzz", "line_number": 390, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 404, "usage_type": "call"}, {"api_name": "numpy.unravel_index", "line_number": 406, "usage_type": "call"}, {"api_name": "app.centrality.Centrality", "line_number": 434, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 518, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 519, "usage_type": "call"}, {"api_name": "segeval.convert_positions_to_masses", "line_number": 531, "usage_type": "call"}, {"api_name": "segeval.convert_positions_to_masses", "line_number": 532, "usage_type": "call"}, {"api_name": "segeval.segmentation_similarity", "line_number": 534, "usage_type": "call"}, {"api_name": "segeval.pk", "line_number": 536, "usage_type": "call"}, {"api_name": "segeval.window_diff", "line_number": 538, "usage_type": "call"}, {"api_name": "app.centrality.Centrality", "line_number": 562, "usage_type": "call"}, {"api_name": "app.centrality.Centrality", "line_number": 580, "usage_type": "call"}]} +{"seq_id": "71673614390", "text": "# https://arxiv.org/pdf/1708.03080.pdf\n# https://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=&cad=rja&uact=8&ved=2ahUKEwjFlNOipOL1AhXnjokEHU6LAnUQFnoECA0QAw&url=https%3A%2F%2Fwww.sciencedirect.com%2Fscience%2Farticle%2Fpii%2FS2352146520304804%2Fpdf%3Fmd5%3D7efbb0aed0d1a2cdec7d2fb6d040f632%26pid%3D1-s2.0-S2352146520304804-main.pdf&usg=AOvVaw3A5ZPzSHEk03XlNIj-TR3C\n\nfrom math import *\nfrom typing import List, Sequence\nimport matplotlib.pyplot as plt\nfrom random import *\nimport numpy as np\nfrom PIL import Image\nimport heapq\nimport cv2\nimport os\nimport json\nimport cProfile\n\nAGENT_RADIUS = 0.2 # size of agent (m)\nAGENT_MAX_ANGLE_CHANGE = pi/2 # maximum agent direction change\nAGENT_DISTANCE_WEIGHT = 0.4 # lower = more likely to slow down\nAGENT_COMFORTABLE_SPACING = 1 # additional distance the agent likes to keep to others (m)\nAGENT_SPACING_SHIFT = 0.8 # agent shift away from others (per step) (m) (NOT as a function of step size)\nAGENT_STEP = 0.8 # step size of the agent (m)\nAGENT_SEATING_PENALTY = 5 # scalar for the agent's dislike for seats (1 implies no avoidance of seats)\nAGENT_PATH_LOOKAHEAD = 2 # number of steps ahead of current path location the agent tries to move to\nAGENT_STUCK_THRESHOLD = 6 # how quickly the agent decides it is stuck and changes to a more direct pathing\nAGENT_CROWD_THRESHOLD = 30 # how many people have to be present for the agent to decide \"this is a crowd\" and decrease movement checks\nAGENT_THICK_CROWD_THRESHOLD = 52 # how many people have to be present for the agent to ignore shift calculations\nEXIT_WIDTH = 2 # how close the people need to be to the exit to count as finished (path steps)\nWORLD_FILE = \"Stadium-map.png\"\nWORLD_EXTENTS = Image.open(WORLD_FILE).size\n\nCOLLISION_DIST_SQUARED = (2 * AGENT_RADIUS) * (2 * AGENT_RADIUS)\nAVOIDANCE_DIST_SQUARED = (2 * AGENT_RADIUS + AGENT_COMFORTABLE_SPACING) * (2 * AGENT_RADIUS + AGENT_COMFORTABLE_SPACING)\n\nvalues = {}\narcs = []\nfor theta in range(0, 5):\n arcs.append((0.5, (theta-2) / 2 * AGENT_MAX_ANGLE_CHANGE))\n for a in range(1, 5):\n direction = (theta-2) / 2 * AGENT_MAX_ANGLE_CHANGE\n distance = a / 4\n value = AGENT_DISTANCE_WEIGHT * a + (1 - AGENT_DISTANCE_WEIGHT) * (1 - abs(direction) / AGENT_MAX_ANGLE_CHANGE)\n values[(distance, direction)] = value\n\nmoves = [x for x in values]\nmoves.sort(key=lambda x: values[x], reverse=True)\n\nclass Agent:\n x_pos = 0\n y_pos = 0\n desired_step_size = 1.0\n desired_location = (0, 0)\n collision_grid_pos = (0, 0)\n desired_direction = 0\n path = None\n finished = False\n stuck_factor = 0\n stay_put_timer = 0\n\n def __init__(self, pos, speed, goal, time):\n self.x_pos, self.y_pos = pos\n self.desired_step_size = speed\n self.desired_location = goal\n self.stay_put_timer = time\n self.compute_path()\n\n def compute_path(self):\n global world_collisions\n path = world_collisions.get_path((floor(self.x_pos), floor(self.y_pos)), (floor(self.desired_location[0]), floor(self.desired_location[1])))\n if path:\n if path == self.path:\n self.stuck_factor += 1\n else:\n self.path = path\n self.stuck_factor = 0\n\n def update(self):\n if self.stay_put_timer > 0:\n self.stay_put_timer -= AGENT_STEP/1.4\n return\n\n assert not self.finished\n\n global agent_collisions\n global world_collisions\n \n self.compute_path()\n\n if len(self.path) <= EXIT_WIDTH:\n agent_collisions.deregister_member(self)\n self.finished = True\n return\n \n lookahead = min(len(self.path)-1,AGENT_PATH_LOOKAHEAD)\n if self.stuck_factor >= AGENT_STUCK_THRESHOLD:\n lookahead = 1\n self.desired_direction = atan2(self.path[lookahead][1]+0.5 - self.y_pos, self.path[lookahead][0]+0.5 - self.x_pos)\n\n nearby_people = agent_collisions.get_all_collision_considerations(self.collision_grid_pos)\n \n if len(nearby_people) > AGENT_CROWD_THRESHOLD and self.stuck_factor >= 10*AGENT_STUCK_THRESHOLD:\n self.desired_location = world_collisions.random_goal()\n self.stuck_factor = 0\n self.update()\n \n shift = self.shift_avoid_people(nearby_people)\n original_length = self.desired_step_size\n original_direction = self.desired_direction\n self.desired_step_size = shift[0]\n self.desired_direction = shift[1]\n\n move = self.find_next_move(nearby_people)\n self.x_pos, self.y_pos = self.next_position(move)\n agent_collisions.check_for_updates(self)\n\n self.desired_step_size = original_length\n self.desired_direction = original_direction\n\n def shift_avoid_people(self, people):\n # if len(people) > AGENT_THICK_CROWD_THRESHOLD:\n # return [self.desired_step_size, self.desired_direction]\n\n global agent_collisions\n people_pos_sum = [0, 0]\n n_people = 0\n for obstacle in people:\n if self != obstacle:\n dx = (self.x_pos - obstacle.x_pos)\n dy = (self.y_pos - obstacle.y_pos)\n if dx*dx + dy*dy < AVOIDANCE_DIST_SQUARED:\n people_pos_sum[0] += obstacle.x_pos\n people_pos_sum[1] += obstacle.y_pos\n n_people += 1\n if n_people > 0:\n people_center = [people_pos_sum[0] / n_people, people_pos_sum[1] / n_people]\n avoidance_vector = [self.x_pos - people_center[0], self.y_pos - people_center[1]]\n avoidance_dir = atan2(avoidance_vector[1], avoidance_vector[0])\n shift = [\n cos(avoidance_dir) * AGENT_SPACING_SHIFT,\n sin(avoidance_dir) * AGENT_SPACING_SHIFT\n ]\n optimal_vector = [\n cos(self.desired_direction) * self.desired_step_size,\n sin(self.desired_direction) * self.desired_step_size\n ]\n shifted_vector = [\n optimal_vector[0] + shift[0],\n optimal_vector[1] + shift[1]\n ]\n shifted_angle = atan2(shifted_vector[1], shifted_vector[0])\n shifted_length = sqrt(shifted_vector[0]**2 + shifted_vector[1]**2)\n shifted_length = max(min(shifted_length, self.desired_step_size), 0)\n return [self.desired_step_size, shifted_angle]\n return [self.desired_step_size, self.desired_direction]\n\n def next_position(self, move):\n # according to the paper, sin and cos are flipped here, but that doesn't make sense, soooooooo...\n x = move[0] * self.desired_step_size * cos(self.desired_direction + move[1])\n y = move[0] * self.desired_step_size * sin(self.desired_direction + move[1])\n return (self.x_pos + x, self.y_pos + y)\n\n def find_next_move(self, people):\n global moves\n global arcs\n illegal_directions = set()\n if len(people) >= AGENT_CROWD_THRESHOLD:\n for arc in arcs:\n n = self.next_position(arc)\n if not self.is_legal_move(n, people):\n illegal_directions.add(arc[1])\n \n best_selection = (0.0, 0.0)\n for move in moves:\n if move[1] in illegal_directions:\n continue\n n = self.next_position(move)\n if self.is_legal_move(n, people):\n best_selection = move\n break\n return best_selection\n\n def is_legal_move(self, n, people):\n global agent_collisions\n global world_collisions\n if not world_collisions.is_valid_location(n):\n return False\n for obstacle in people:\n if self != obstacle:\n dx = (n[0] - obstacle.x_pos)\n dy = (n[1] - obstacle.y_pos)\n if dx*dx + dy*dy < COLLISION_DIST_SQUARED:\n return False\n return True\n\nclass AgentCollisionManager:\n square_side = 1\n squares = []\n\n def __init__(self, x, y):\n self.squares = [[[] for _ in range(ceil(y/self.square_side)+1)] for _ in range(ceil(x/self.square_side)+1)]\n # +1 because buffer (avoids having to out-of-bounds check, saving time in get_all_collision_considerations)\n\n def check_for_updates(self, member: Agent):\n coord = [floor(member.x_pos/self.square_side),floor(member.y_pos/self.square_side)]\n if coord[0] != member.collision_grid_pos[0] and coord[1] != member.collision_grid_pos[1]:\n self.squares[member.collision_grid_pos[0]][member.collision_grid_pos[1]].remove(member)\n self.squares[coord[0]][coord[1]].append(member)\n member.collision_grid_pos = coord\n\n def register_member(self, member: Agent):\n coord = [floor(member.x_pos/self.square_side),floor(member.y_pos/self.square_side)]\n self.squares[coord[0]][coord[1]].append(member)\n member.collision_grid_pos = coord\n \n def deregister_member(self, member: Agent):\n self.squares[member.collision_grid_pos[0]][member.collision_grid_pos[1]].remove(member)\n\n def point_in_square(self, pos, index):\n bounds = self.get_square_bounds(index)\n return pos[0] >= bounds[0] and pos[1] >= bounds[1] and pos[0] < bounds[2] and pos[1] < bounds[3]\n\n def get_square_bounds(self, index):\n min_x = index[0] * self.square_side\n min_y = index[1] * self.square_side\n max_x = (index[0] + 1) * self.square_side\n max_y = (index[1] + 1) * self.square_side\n return [min_x, min_y, max_x, max_y]\n\n def get_all_collision_considerations(self, index):\n objects = []\n for dx in range(-1, 2):\n for dy in range(-1, 2):\n objects += self.squares[index[0]+dx][index[1]+dy]\n return objects\n\nclass WorldManager:\n illegal_pixels = set()\n seating_pixels = set()\n goals = []\n ideal_goals = {}\n paths = {}\n\n def __init__(self, image):\n arr = np.asarray(image)\n for x in range(len(arr[0])):\n for y in range(len(arr)):\n px = arr[(WORLD_EXTENTS[1]-1)-y, x]\n if px[0] == 0 and px[1] == 0 and px[2] == 0:\n self.illegal_pixels.add((float(x), float(y)))\n if px[0] == 255 and px[1] == 0 and px[2] == 0:\n self.seating_pixels.add((float(x), float(y)))\n if px[0] == 0 and px[1] == 255 and px[2] == 0:\n self.goals.append((x, y))\n self.compute_ideal_goals()\n \n def read_paths(self):\n if os.path.exists(WORLD_FILE[:-4] + \"-pathdata.json\"):\n f = open(WORLD_FILE[:-4] + \"-pathdata.json\", \"r\")\n self.paths = json.loads(f.read())\n f.close()\n else:\n self.precompute_paths()\n \n def precompute_paths(self):\n for goal in self.goals:\n print(\"computing paths to:\", goal)\n self.compute_paths(goal)\n print(\"done computing paths\")\n with open(WORLD_FILE[:-4] + \"-pathdata.json\", \"w\") as f:\n f.write(json.dumps(self.paths))\n \n def is_valid_location(self, point):\n x = floor(point[0])\n y = floor(point[1])\n return (x, y) not in self.illegal_pixels\n \n def is_seating_location(self, point):\n x = floor(point[0])\n y = floor(point[1])\n return (x, y) in self.seating_pixels\n\n def compute_ideal_goals(self):\n if os.path.exists(WORLD_FILE[:-4] + \"-exitdata.json\"):\n f = open(WORLD_FILE[:-4] + \"-exitdata.json\", \"r\")\n self.ideal_goals = json.loads(f.read())\n f.close()\n else:\n goal_preferences = [[] for _ in range(len(self.goals))]\n for i in range(len(self.goals)):\n for pos in self.seating_pixels:\n goal_preferences[i].append([(pos[0]-self.goals[i][0])**2 + (pos[1]-self.goals[i][1])**2, pos])\n goal_preferences[i].sort(key=lambda x: x[0])\n \n assigned = set()\n goal_considered = [0 for _ in range(len(self.goals))]\n i = 0\n while len(assigned) < len(self.seating_pixels):\n while goal_preferences[i][goal_considered[i]][1] in assigned:\n goal_considered[i] +=1\n assigned.add(goal_preferences[i][goal_considered[i]][1])\n self.ideal_goals[str(goal_preferences[i][goal_considered[i]][1])] = self.goals[i]\n i = (i+1)%len(self.goals)\n \n with open(WORLD_FILE[:-4] + \"-exitdata.json\", \"w\") as f:\n f.write(json.dumps(self.ideal_goals))\n \n def smart_closest_goal(self, pos):\n return self.ideal_goals[str((float(floor(pos[0])),float(floor(pos[1]))))]\n \n def closest_goal(self, pos):\n best = 0\n distance = 1e100\n for i,g in enumerate(self.goals):\n if (g[0]-pos[0])**2 + (g[1]-pos[1])**2 < distance:\n distance = (g[0]-pos[0])**2 + (g[1]-pos[1])**2\n best = i\n return self.goals[best]\n \n def random_goal(self):\n return self.goals[randint(0,len(self.goals)-1)]\n \n def assign_goal(self, mode, pos):\n if mode == \"random\":\n return self.random_goal()\n elif mode == \"closest\":\n return self.closest_goal(pos)\n elif mode == \"smartclosest\":\n return self.smart_closest_goal(pos)\n \n def get_path(self, pos, goal):\n if str((pos[0],pos[1],goal[0],goal[1])) in self.paths:\n path = [pos]\n for _ in range(AGENT_PATH_LOOKAHEAD):\n p = self.paths[str((path[-1][0],path[-1][1],goal[0],goal[1]))]\n if p == None:\n break\n path.append(p)\n return path\n else:\n print(str((pos[0],pos[1],goal[0],goal[1])))\n\n def compute_paths(self, goal):\n # Djikstra's algorithm\n # Computes every path to this goal\n frontier = [DijkstraNode(goal, None)]\n heapq.heapify(frontier)\n frontier_set = set(frontier)\n visited = set()\n\n while len(frontier) != 0:\n next_node = heapq.heappop(frontier)\n visited.add(next_node)\n frontier_set.remove(next_node)\n for neighbor in self.neighbors(next_node.pos):\n if not self.is_valid_location(neighbor):\n continue\n n = DijkstraNode(neighbor, next_node)\n if n in visited:\n continue\n if n not in frontier_set:\n frontier_set.add(n)\n heapq.heappush(frontier, n)\n else:\n for e in frontier_set:\n if e == n:\n if e > n:\n e.set_parent(next_node)\n break\n \n for node in visited:\n if node.parent != None:\n self.paths[str((node.pos[0], node.pos[1], goal[0], goal[1]))] = node.parent.pos\n else:\n self.paths[str((node.pos[0], node.pos[1], goal[0], goal[1]))] = None\n\n def neighbors(self, pos):\n return [\n (pos[0] - 1, pos[1] - 1),\n (pos[0], pos[1] - 1),\n (pos[0] + 1, pos[1] - 1),\n (pos[0] - 1, pos[1]),\n (pos[0] + 1, pos[1]),\n (pos[0] - 1, pos[1] + 1),\n (pos[0], pos[1] + 1),\n (pos[0] + 1, pos[1] + 1)\n ]\n\nclass DijkstraNode:\n pos = (0, 0)\n parent = None\n cost = 0\n\n def __init__(self, pos, parent):\n self.pos = pos\n self.parent = parent\n if self.parent == None:\n self.cost = 0\n else:\n self.set_cost()\n\n def set_parent(self, new_parent):\n self.parent = new_parent\n self.set_cost()\n\n def set_cost(self):\n step_cost = sqrt((self.pos[0]-self.parent.pos[0])**2 + (self.pos[1]-self.parent.pos[1])**2)\n if world_collisions.is_seating_location(self.pos):\n step_cost *= AGENT_SEATING_PENALTY\n self.cost = self.parent.cost + step_cost\n\n def __eq__(self, other) -> bool:\n if type(other) == DijkstraNode:\n return self.pos == other.pos\n return False\n\n def __ne__(self, other) -> bool:\n if type(other) == DijkstraNode:\n return self.pos != other.pos\n return True\n\n def __lt__(self, other) -> bool:\n return self.cost < other.cost\n\n def __gt__(self, other) -> bool:\n return self.cost > other.cost\n\n def __le__(self, other) -> bool:\n return self.cost <= other.cost\n\n def __ge__(self, other) -> bool:\n return self.cost >= other.cost\n\n def __hash__(self):\n return hash(self.pos)\n\nworld_collisions = WorldManager(Image.open(WORLD_FILE))\nworld_collisions.read_paths()\nagent_collisions = AgentCollisionManager(WORLD_EXTENTS[0], WORLD_EXTENTS[1])\n\ndef run_sim(percent_filled, time, draw_interval = 1, pathing = \"random\"):\n agents = []\n\n for gridpos in world_collisions.seating_pixels:\n for pos in [[gridpos[0]+0.25, gridpos[1]+0.25],[gridpos[0]+0.25, gridpos[1]+0.75],[gridpos[0]+0.75, gridpos[1]+0.25],[gridpos[0]+0.75, gridpos[1]+0.75]]:\n if random() < percent_filled:\n a = Agent((pos[0], pos[1]), AGENT_STEP, world_collisions.assign_goal(pathing,pos), 0)\n agents.append(a)\n agent_collisions.register_member(a)\n print(f\"simulating {len(agents)} agents\")\n\n agent_positions = [[] for _ in range(len(agents))]\n agent_counts = []\n\n plt.imshow(plt.imread(WORLD_FILE), extent=[0, WORLD_EXTENTS[0], 0, WORLD_EXTENTS[1]])\n plt.savefig(\"temp.png\")\n plt.clf()\n frame = cv2.imread(\"temp.png\")\n height, width, _ = frame.shape\n\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n video = cv2.VideoWriter(\"movement.mp4\", fourcc, 30, (width, height))\n\n for t in range(time):\n print(str((t+1)/time * 100) + \"%\")\n done = True\n c = 0\n for i, a in enumerate(agents):\n if not a.finished:\n c += 1\n done = False\n a.update()\n if a.finished:\n agent_positions[i].append([0,0])\n continue\n if t%draw_interval == 0:\n agent_positions[i].append((a.x_pos, a.y_pos))\n agent_counts.append(c)\n \n if done:\n break\n \n if t%draw_interval == 0:\n plt.scatter(list(map(lambda x: x[-1][0], agent_positions)), list(map(lambda x: x[-1][1], agent_positions)), s=4)\n plt.imshow(plt.imread(WORLD_FILE), extent=[0, WORLD_EXTENTS[0], 0, WORLD_EXTENTS[1]])\n # plt.show()\n plt.savefig(\"temp.png\")\n plt.clf()\n video.write(cv2.imread(\"temp.png\"))\n\n cv2.destroyAllWindows()\n video.release()\n \n for pos_list in agent_positions:\n plt.plot(list(map(lambda x: x[0], pos_list[:-1])), list(map(lambda x: x[1], pos_list[:-1])), 'o-', linewidth=1, markersize=3)\n plt.imshow(plt.imread(WORLD_FILE), extent=[0, WORLD_EXTENTS[0], 0, WORLD_EXTENTS[1]])\n plt.show()\n\n print(agent_counts)\n\n return (t+1) * AGENT_STEP/1.4\n\nif __name__ == \"__main__\":\n max_time = 30*60*4 # ~half an hour\n\n # cProfile.run(\"run_sim(1.0, max_time, draw_interval = 20)\",\"profilestats\")\n with open(\"results.txt\",\"w\") as f:\n random_100 = run_sim(1.0, max_time, draw_interval = 10, pathing = 'random')\n print(f\"the agents exited the stadium in {random_100} seconds\")\n f.write(str(random_100) + \"\\n\")", "repo_name": "Footkick72/TSA", "sub_path": "agentCrowdSim.py", "file_name": "agentCrowdSim.py", "file_ext": "py", "file_size_in_byte": 19691, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "94", "api": [{"api_name": "PIL.Image.open", "line_number": 29, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 29, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 243, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 256, "usage_type": "call"}, {"api_name": "os.path", "line_number": 256, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 258, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 269, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 282, "usage_type": "call"}, {"api_name": "os.path", "line_number": 282, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 284, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 304, "usage_type": "call"}, {"api_name": "heapq.heapify", "line_number": 345, "usage_type": "call"}, {"api_name": "heapq.heappop", "line_number": 350, "usage_type": "call"}, {"api_name": "heapq.heappush", "line_number": 361, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 435, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 435, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 453, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 453, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imread", "line_number": 453, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 454, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 454, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 455, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 455, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 456, "usage_type": "call"}, {"api_name": "cv2.VideoWriter_fourcc", "line_number": 459, "usage_type": "call"}, {"api_name": "cv2.VideoWriter", "line_number": 460, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 482, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 482, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 483, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 483, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imread", "line_number": 483, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 485, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 485, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 486, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 486, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 487, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 489, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 493, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 493, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 494, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 494, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imread", "line_number": 494, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 495, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 495, "usage_type": "name"}]}